Readme
This model doesn't have a readme.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run colinmcdonnell22/redbull_doodles using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"colinmcdonnell22/redbull_doodles:14c616496f87e094a49107a67ef5b7221c25c8bbee980913cbd24e59ff3c2591",
{
input: {
model: "dev",
prompt: "a DOODL of people playing soccer in the park",
go_fast: false,
lora_scale: 1,
megapixels: "1",
num_outputs: 1,
aspect_ratio: "1:1",
output_format: "webp",
guidance_scale: 3,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run colinmcdonnell22/redbull_doodles using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"colinmcdonnell22/redbull_doodles:14c616496f87e094a49107a67ef5b7221c25c8bbee980913cbd24e59ff3c2591",
input={
"model": "dev",
"prompt": "a DOODL of people playing soccer in the park",
"go_fast": False,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run colinmcdonnell22/redbull_doodles using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "colinmcdonnell22/redbull_doodles:14c616496f87e094a49107a67ef5b7221c25c8bbee980913cbd24e59ff3c2591",
"input": {
"model": "dev",
"prompt": "a DOODL of people playing soccer in the park",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2025-02-01T06:58:01.732668Z",
"created_at": "2025-02-01T06:57:54.696000Z",
"data_removed": false,
"error": null,
"id": "k87kr8sw11rma0cmr16rev0q2r",
"input": {
"model": "dev",
"prompt": "a DOODL of people playing soccer in the park",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
},
"logs": "2025-02-01 06:57:54.944 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2025-02-01 06:57:54.945 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 89%|████████▉ | 272/304 [00:00<00:00, 2714.77it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2519.01it/s]\n2025-02-01 06:57:55.066 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s\n2025-02-01 06:57:55.067 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/ea4b30cdef6fddc0\n2025-02-01 06:57:55.179 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 93%|█████████▎| 282/304 [00:00<00:00, 2792.09it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2667.65it/s]\n2025-02-01 06:57:55.294 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.23s\nUsing seed: 38673\n0it [00:00, ?it/s]\n1it [00:00, 8.33it/s]\n2it [00:00, 5.82it/s]\n3it [00:00, 5.31it/s]\n4it [00:00, 5.10it/s]\n5it [00:00, 4.98it/s]\n6it [00:01, 4.91it/s]\n7it [00:01, 4.89it/s]\n8it [00:01, 4.86it/s]\n9it [00:01, 4.83it/s]\n10it [00:01, 4.81it/s]\n11it [00:02, 4.79it/s]\n12it [00:02, 4.78it/s]\n13it [00:02, 4.78it/s]\n14it [00:02, 4.76it/s]\n15it [00:03, 4.75it/s]\n16it [00:03, 4.42it/s]\n17it [00:03, 4.38it/s]\n18it [00:03, 4.47it/s]\n19it [00:03, 4.42it/s]\n20it [00:04, 4.62it/s]\n21it [00:04, 4.65it/s]\n22it [00:04, 4.44it/s]\n23it [00:04, 4.47it/s]\n24it [00:05, 4.50it/s]\n25it [00:05, 4.45it/s]\n26it [00:05, 4.34it/s]\n27it [00:05, 4.41it/s]\n28it [00:06, 4.42it/s]\n28it [00:06, 4.66it/s]\nTotal safe images: 1 out of 1",
"metrics": {
"predict_time": 6.787168632,
"total_time": 7.036668
},
"output": [
"https://replicate.delivery/xezq/HLT8VPbKClZIEFXviv9qunfDu7tQbqIEyiq34yR3Rrq8vbFKA/out-0.webp"
],
"started_at": "2025-02-01T06:57:54.945499Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bsvm-qttwectna6sp2girdktv657r2fzoi3eac2pmdm7mjsjiriuatobq",
"get": "https://api.replicate.com/v1/predictions/k87kr8sw11rma0cmr16rev0q2r",
"cancel": "https://api.replicate.com/v1/predictions/k87kr8sw11rma0cmr16rev0q2r/cancel"
},
"version": "14c616496f87e094a49107a67ef5b7221c25c8bbee980913cbd24e59ff3c2591"
}
2025-02-01 06:57:54.944 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-02-01 06:57:54.945 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 89%|████████▉ | 272/304 [00:00<00:00, 2714.77it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2519.01it/s]
2025-02-01 06:57:55.066 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
2025-02-01 06:57:55.067 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/ea4b30cdef6fddc0
2025-02-01 06:57:55.179 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 93%|█████████▎| 282/304 [00:00<00:00, 2792.09it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2667.65it/s]
2025-02-01 06:57:55.294 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.23s
Using seed: 38673
0it [00:00, ?it/s]
1it [00:00, 8.33it/s]
2it [00:00, 5.82it/s]
3it [00:00, 5.31it/s]
4it [00:00, 5.10it/s]
5it [00:00, 4.98it/s]
6it [00:01, 4.91it/s]
7it [00:01, 4.89it/s]
8it [00:01, 4.86it/s]
9it [00:01, 4.83it/s]
10it [00:01, 4.81it/s]
11it [00:02, 4.79it/s]
12it [00:02, 4.78it/s]
13it [00:02, 4.78it/s]
14it [00:02, 4.76it/s]
15it [00:03, 4.75it/s]
16it [00:03, 4.42it/s]
17it [00:03, 4.38it/s]
18it [00:03, 4.47it/s]
19it [00:03, 4.42it/s]
20it [00:04, 4.62it/s]
21it [00:04, 4.65it/s]
22it [00:04, 4.44it/s]
23it [00:04, 4.47it/s]
24it [00:05, 4.50it/s]
25it [00:05, 4.45it/s]
26it [00:05, 4.34it/s]
27it [00:05, 4.41it/s]
28it [00:06, 4.42it/s]
28it [00:06, 4.66it/s]
Total safe images: 1 out of 1
This model costs approximately $0.017 to run on Replicate, or 58 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia H100 GPU hardware. Predictions typically complete within 11 seconds.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
2025-02-01 06:57:54.944 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-02-01 06:57:54.945 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 89%|████████▉ | 272/304 [00:00<00:00, 2714.77it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2519.01it/s]
2025-02-01 06:57:55.066 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
2025-02-01 06:57:55.067 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/ea4b30cdef6fddc0
2025-02-01 06:57:55.179 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-02-01 06:57:55.180 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 93%|█████████▎| 282/304 [00:00<00:00, 2792.09it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2667.65it/s]
2025-02-01 06:57:55.294 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.23s
Using seed: 38673
0it [00:00, ?it/s]
1it [00:00, 8.33it/s]
2it [00:00, 5.82it/s]
3it [00:00, 5.31it/s]
4it [00:00, 5.10it/s]
5it [00:00, 4.98it/s]
6it [00:01, 4.91it/s]
7it [00:01, 4.89it/s]
8it [00:01, 4.86it/s]
9it [00:01, 4.83it/s]
10it [00:01, 4.81it/s]
11it [00:02, 4.79it/s]
12it [00:02, 4.78it/s]
13it [00:02, 4.78it/s]
14it [00:02, 4.76it/s]
15it [00:03, 4.75it/s]
16it [00:03, 4.42it/s]
17it [00:03, 4.38it/s]
18it [00:03, 4.47it/s]
19it [00:03, 4.42it/s]
20it [00:04, 4.62it/s]
21it [00:04, 4.65it/s]
22it [00:04, 4.44it/s]
23it [00:04, 4.47it/s]
24it [00:05, 4.50it/s]
25it [00:05, 4.45it/s]
26it [00:05, 4.34it/s]
27it [00:05, 4.41it/s]
28it [00:06, 4.42it/s]
28it [00:06, 4.66it/s]
Total safe images: 1 out of 1