Readme
This model doesn't have a readme.
SDXL trained on a small cybertruck dataset
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run hudsongraeme/cybertruck using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"hudsongraeme/cybertruck:4e7b92920cf8bbec4862ccad2f905d83430d1ee54f47261d52e055aeadf6f9da",
{
input: {
width: 1024,
height: 1024,
prompt: "A photo of TOK drifting",
refine: "no_refiner",
scheduler: "K_EULER",
lora_scale: 0.6,
num_outputs: 4,
guidance_scale: 7.5,
apply_watermark: true,
high_noise_frac: 0.8,
negative_prompt: "",
prompt_strength: 0.8,
num_inference_steps: 50
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run hudsongraeme/cybertruck using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"hudsongraeme/cybertruck:4e7b92920cf8bbec4862ccad2f905d83430d1ee54f47261d52e055aeadf6f9da",
input={
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK drifting",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 4,
"guidance_scale": 7.5,
"apply_watermark": True,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run hudsongraeme/cybertruck using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "4e7b92920cf8bbec4862ccad2f905d83430d1ee54f47261d52e055aeadf6f9da",
"input": {
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK drifting",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 4,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/hudsongraeme/cybertruck@sha256:4e7b92920cf8bbec4862ccad2f905d83430d1ee54f47261d52e055aeadf6f9da \
-i 'width=1024' \
-i 'height=1024' \
-i 'prompt="A photo of TOK drifting"' \
-i 'refine="no_refiner"' \
-i 'scheduler="K_EULER"' \
-i 'lora_scale=0.6' \
-i 'num_outputs=4' \
-i 'guidance_scale=7.5' \
-i 'apply_watermark=true' \
-i 'high_noise_frac=0.8' \
-i 'negative_prompt=""' \
-i 'prompt_strength=0.8' \
-i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/hudsongraeme/cybertruck@sha256:4e7b92920cf8bbec4862ccad2f905d83430d1ee54f47261d52e055aeadf6f9da
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 1024, "height": 1024, "prompt": "A photo of TOK drifting", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.6, "num_outputs": 4, "guidance_scale": 7.5, "apply_watermark": true, "high_noise_frac": 0.8, "negative_prompt": "", "prompt_strength": 0.8, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.060. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2023-10-06T17:38:40.838180Z",
"created_at": "2023-10-06T17:37:38.680365Z",
"data_removed": false,
"error": null,
"id": "a4vh2h3btuawwjusgmntecwoxm",
"input": {
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK drifting",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 4,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
},
"logs": "Using seed: 880\nEnsuring enough disk space...\nFree disk space: 3320234479616\nDownloading weights: https://pbxt.replicate.delivery/4h6fsYXIdRXfrkTavb4PfmCshSz7LnHFKsOyVMJXHSWa1kOjA/trained_model.tar\nb''\nDownloaded weights in 0.4663057327270508 seconds\nLoading fine-tuned model\nDoes not have Unet. assume we are using LoRA\nLoading Unet LoRA\nPrompt: A photo of <s0><s1> drifting\ntxt2img mode\n 0%| | 0/50 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/nn/modules/conv.py:459: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.)\nreturn F.conv2d(input, weight, bias, self.stride,\n 2%|▏ | 1/50 [00:01<01:21, 1.67s/it]\n 4%|▍ | 2/50 [00:02<01:02, 1.31s/it]\n 6%|▌ | 3/50 [00:03<00:56, 1.20s/it]\n 8%|▊ | 4/50 [00:04<00:52, 1.15s/it]\n 10%|█ | 5/50 [00:05<00:50, 1.12s/it]\n 12%|█▏ | 6/50 [00:06<00:48, 1.10s/it]\n 14%|█▍ | 7/50 [00:08<00:46, 1.09s/it]\n 16%|█▌ | 8/50 [00:09<00:45, 1.08s/it]\n 18%|█▊ | 9/50 [00:10<00:44, 1.08s/it]\n 20%|██ | 10/50 [00:11<00:42, 1.07s/it]\n 22%|██▏ | 11/50 [00:12<00:41, 1.07s/it]\n 24%|██▍ | 12/50 [00:13<00:40, 1.07s/it]\n 26%|██▌ | 13/50 [00:14<00:39, 1.06s/it]\n 28%|██▊ | 14/50 [00:15<00:38, 1.06s/it]\n 30%|███ | 15/50 [00:16<00:37, 1.06s/it]\n 32%|███▏ | 16/50 [00:17<00:36, 1.06s/it]\n 34%|███▍ | 17/50 [00:18<00:35, 1.06s/it]\n 36%|███▌ | 18/50 [00:19<00:33, 1.06s/it]\n 38%|███▊ | 19/50 [00:20<00:32, 1.06s/it]\n 40%|████ | 20/50 [00:21<00:31, 1.06s/it]\n 42%|████▏ | 21/50 [00:22<00:30, 1.06s/it]\n 44%|████▍ | 22/50 [00:23<00:29, 1.06s/it]\n 46%|████▌ | 23/50 [00:25<00:28, 1.06s/it]\n 48%|████▊ | 24/50 [00:26<00:27, 1.06s/it]\n 50%|█████ | 25/50 [00:27<00:26, 1.06s/it]\n 52%|█████▏ | 26/50 [00:28<00:25, 1.07s/it]\n 54%|█████▍ | 27/50 [00:29<00:24, 1.06s/it]\n 56%|█████▌ | 28/50 [00:30<00:23, 1.07s/it]\n 58%|█████▊ | 29/50 [00:31<00:22, 1.07s/it]\n 60%|██████ | 30/50 [00:32<00:21, 1.07s/it]\n 62%|██████▏ | 31/50 [00:33<00:20, 1.07s/it]\n 64%|██████▍ | 32/50 [00:34<00:19, 1.07s/it]\n 66%|██████▌ | 33/50 [00:35<00:18, 1.07s/it]\n 68%|██████▊ | 34/50 [00:36<00:17, 1.07s/it]\n 70%|███████ | 35/50 [00:37<00:16, 1.07s/it]\n 72%|███████▏ | 36/50 [00:38<00:14, 1.07s/it]\n 74%|███████▍ | 37/50 [00:40<00:13, 1.07s/it]\n 76%|███████▌ | 38/50 [00:41<00:12, 1.07s/it]\n 78%|███████▊ | 39/50 [00:42<00:11, 1.07s/it]\n 80%|████████ | 40/50 [00:43<00:10, 1.07s/it]\n 82%|████████▏ | 41/50 [00:44<00:09, 1.07s/it]\n 84%|████████▍ | 42/50 [00:45<00:08, 1.07s/it]\n 86%|████████▌ | 43/50 [00:46<00:07, 1.07s/it]\n 88%|████████▊ | 44/50 [00:47<00:06, 1.07s/it]\n 90%|█████████ | 45/50 [00:48<00:05, 1.07s/it]\n 92%|█████████▏| 46/50 [00:49<00:04, 1.07s/it]\n 94%|█████████▍| 47/50 [00:50<00:03, 1.07s/it]\n 96%|█████████▌| 48/50 [00:51<00:02, 1.07s/it]\n 98%|█████████▊| 49/50 [00:52<00:01, 1.07s/it]\n100%|██████████| 50/50 [00:53<00:00, 1.07s/it]\n100%|██████████| 50/50 [00:53<00:00, 1.08s/it]",
"metrics": {
"predict_time": 61.773797,
"total_time": 62.157815
},
"output": [
"https://pbxt.replicate.delivery/ewjeYerw1mBtgoXLONoMRBXX7BfV2POdfAT6BV4fOeBoPwv1IA/out-0.png",
"https://pbxt.replicate.delivery/Jl7HQbx0Jl6FGVEk0jhk0vuLe8l75EtfiY8Z1eTZ8dgfBebNC/out-1.png",
"https://pbxt.replicate.delivery/uLex6VJOQT3VPaXgwqUhCZSTp7VzkfEfJV63gSPVMygABftGB/out-2.png",
"https://pbxt.replicate.delivery/edmvfKyCSohuL0NWJ4gIshyZqsLWenFw3D8TdNMfqlyCCebNC/out-3.png"
],
"started_at": "2023-10-06T17:37:39.064383Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/a4vh2h3btuawwjusgmntecwoxm",
"cancel": "https://api.replicate.com/v1/predictions/a4vh2h3btuawwjusgmntecwoxm/cancel"
},
"version": "50ef505f835eb26967d7f3df96103ee0a90d51eeaea60bf7c2372e6ef70b0d06"
}
Using seed: 880
Ensuring enough disk space...
Free disk space: 3320234479616
Downloading weights: https://pbxt.replicate.delivery/4h6fsYXIdRXfrkTavb4PfmCshSz7LnHFKsOyVMJXHSWa1kOjA/trained_model.tar
b''
Downloaded weights in 0.4663057327270508 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Prompt: A photo of <s0><s1> drifting
txt2img mode
0%| | 0/50 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/nn/modules/conv.py:459: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.)
return F.conv2d(input, weight, bias, self.stride,
2%|▏ | 1/50 [00:01<01:21, 1.67s/it]
4%|▍ | 2/50 [00:02<01:02, 1.31s/it]
6%|▌ | 3/50 [00:03<00:56, 1.20s/it]
8%|▊ | 4/50 [00:04<00:52, 1.15s/it]
10%|█ | 5/50 [00:05<00:50, 1.12s/it]
12%|█▏ | 6/50 [00:06<00:48, 1.10s/it]
14%|█▍ | 7/50 [00:08<00:46, 1.09s/it]
16%|█▌ | 8/50 [00:09<00:45, 1.08s/it]
18%|█▊ | 9/50 [00:10<00:44, 1.08s/it]
20%|██ | 10/50 [00:11<00:42, 1.07s/it]
22%|██▏ | 11/50 [00:12<00:41, 1.07s/it]
24%|██▍ | 12/50 [00:13<00:40, 1.07s/it]
26%|██▌ | 13/50 [00:14<00:39, 1.06s/it]
28%|██▊ | 14/50 [00:15<00:38, 1.06s/it]
30%|███ | 15/50 [00:16<00:37, 1.06s/it]
32%|███▏ | 16/50 [00:17<00:36, 1.06s/it]
34%|███▍ | 17/50 [00:18<00:35, 1.06s/it]
36%|███▌ | 18/50 [00:19<00:33, 1.06s/it]
38%|███▊ | 19/50 [00:20<00:32, 1.06s/it]
40%|████ | 20/50 [00:21<00:31, 1.06s/it]
42%|████▏ | 21/50 [00:22<00:30, 1.06s/it]
44%|████▍ | 22/50 [00:23<00:29, 1.06s/it]
46%|████▌ | 23/50 [00:25<00:28, 1.06s/it]
48%|████▊ | 24/50 [00:26<00:27, 1.06s/it]
50%|█████ | 25/50 [00:27<00:26, 1.06s/it]
52%|█████▏ | 26/50 [00:28<00:25, 1.07s/it]
54%|█████▍ | 27/50 [00:29<00:24, 1.06s/it]
56%|█████▌ | 28/50 [00:30<00:23, 1.07s/it]
58%|█████▊ | 29/50 [00:31<00:22, 1.07s/it]
60%|██████ | 30/50 [00:32<00:21, 1.07s/it]
62%|██████▏ | 31/50 [00:33<00:20, 1.07s/it]
64%|██████▍ | 32/50 [00:34<00:19, 1.07s/it]
66%|██████▌ | 33/50 [00:35<00:18, 1.07s/it]
68%|██████▊ | 34/50 [00:36<00:17, 1.07s/it]
70%|███████ | 35/50 [00:37<00:16, 1.07s/it]
72%|███████▏ | 36/50 [00:38<00:14, 1.07s/it]
74%|███████▍ | 37/50 [00:40<00:13, 1.07s/it]
76%|███████▌ | 38/50 [00:41<00:12, 1.07s/it]
78%|███████▊ | 39/50 [00:42<00:11, 1.07s/it]
80%|████████ | 40/50 [00:43<00:10, 1.07s/it]
82%|████████▏ | 41/50 [00:44<00:09, 1.07s/it]
84%|████████▍ | 42/50 [00:45<00:08, 1.07s/it]
86%|████████▌ | 43/50 [00:46<00:07, 1.07s/it]
88%|████████▊ | 44/50 [00:47<00:06, 1.07s/it]
90%|█████████ | 45/50 [00:48<00:05, 1.07s/it]
92%|█████████▏| 46/50 [00:49<00:04, 1.07s/it]
94%|█████████▍| 47/50 [00:50<00:03, 1.07s/it]
96%|█████████▌| 48/50 [00:51<00:02, 1.07s/it]
98%|█████████▊| 49/50 [00:52<00:01, 1.07s/it]
100%|██████████| 50/50 [00:53<00:00, 1.07s/it]
100%|██████████| 50/50 [00:53<00:00, 1.08s/it]
This example was created by a different version, hudsongraeme/cybertruck:50ef505f.
This model costs approximately $0.060 to run on Replicate, or 16 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia L40S GPU hardware. Predictions typically complete within 62 seconds. The predict time for this model varies significantly based on the inputs.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
Using seed: 880
Ensuring enough disk space...
Free disk space: 3320234479616
Downloading weights: https://pbxt.replicate.delivery/4h6fsYXIdRXfrkTavb4PfmCshSz7LnHFKsOyVMJXHSWa1kOjA/trained_model.tar
b''
Downloaded weights in 0.4663057327270508 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Prompt: A photo of <s0><s1> drifting
txt2img mode
0%| | 0/50 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/nn/modules/conv.py:459: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.)
return F.conv2d(input, weight, bias, self.stride,
2%|▏ | 1/50 [00:01<01:21, 1.67s/it]
4%|▍ | 2/50 [00:02<01:02, 1.31s/it]
6%|▌ | 3/50 [00:03<00:56, 1.20s/it]
8%|▊ | 4/50 [00:04<00:52, 1.15s/it]
10%|█ | 5/50 [00:05<00:50, 1.12s/it]
12%|█▏ | 6/50 [00:06<00:48, 1.10s/it]
14%|█▍ | 7/50 [00:08<00:46, 1.09s/it]
16%|█▌ | 8/50 [00:09<00:45, 1.08s/it]
18%|█▊ | 9/50 [00:10<00:44, 1.08s/it]
20%|██ | 10/50 [00:11<00:42, 1.07s/it]
22%|██▏ | 11/50 [00:12<00:41, 1.07s/it]
24%|██▍ | 12/50 [00:13<00:40, 1.07s/it]
26%|██▌ | 13/50 [00:14<00:39, 1.06s/it]
28%|██▊ | 14/50 [00:15<00:38, 1.06s/it]
30%|███ | 15/50 [00:16<00:37, 1.06s/it]
32%|███▏ | 16/50 [00:17<00:36, 1.06s/it]
34%|███▍ | 17/50 [00:18<00:35, 1.06s/it]
36%|███▌ | 18/50 [00:19<00:33, 1.06s/it]
38%|███▊ | 19/50 [00:20<00:32, 1.06s/it]
40%|████ | 20/50 [00:21<00:31, 1.06s/it]
42%|████▏ | 21/50 [00:22<00:30, 1.06s/it]
44%|████▍ | 22/50 [00:23<00:29, 1.06s/it]
46%|████▌ | 23/50 [00:25<00:28, 1.06s/it]
48%|████▊ | 24/50 [00:26<00:27, 1.06s/it]
50%|█████ | 25/50 [00:27<00:26, 1.06s/it]
52%|█████▏ | 26/50 [00:28<00:25, 1.07s/it]
54%|█████▍ | 27/50 [00:29<00:24, 1.06s/it]
56%|█████▌ | 28/50 [00:30<00:23, 1.07s/it]
58%|█████▊ | 29/50 [00:31<00:22, 1.07s/it]
60%|██████ | 30/50 [00:32<00:21, 1.07s/it]
62%|██████▏ | 31/50 [00:33<00:20, 1.07s/it]
64%|██████▍ | 32/50 [00:34<00:19, 1.07s/it]
66%|██████▌ | 33/50 [00:35<00:18, 1.07s/it]
68%|██████▊ | 34/50 [00:36<00:17, 1.07s/it]
70%|███████ | 35/50 [00:37<00:16, 1.07s/it]
72%|███████▏ | 36/50 [00:38<00:14, 1.07s/it]
74%|███████▍ | 37/50 [00:40<00:13, 1.07s/it]
76%|███████▌ | 38/50 [00:41<00:12, 1.07s/it]
78%|███████▊ | 39/50 [00:42<00:11, 1.07s/it]
80%|████████ | 40/50 [00:43<00:10, 1.07s/it]
82%|████████▏ | 41/50 [00:44<00:09, 1.07s/it]
84%|████████▍ | 42/50 [00:45<00:08, 1.07s/it]
86%|████████▌ | 43/50 [00:46<00:07, 1.07s/it]
88%|████████▊ | 44/50 [00:47<00:06, 1.07s/it]
90%|█████████ | 45/50 [00:48<00:05, 1.07s/it]
92%|█████████▏| 46/50 [00:49<00:04, 1.07s/it]
94%|█████████▍| 47/50 [00:50<00:03, 1.07s/it]
96%|█████████▌| 48/50 [00:51<00:02, 1.07s/it]
98%|█████████▊| 49/50 [00:52<00:01, 1.07s/it]
100%|██████████| 50/50 [00:53<00:00, 1.07s/it]
100%|██████████| 50/50 [00:53<00:00, 1.08s/it]