Readme
This model doesn't have a readme.
Doodles trained on black line drawings, fashion illustrations, and wire sculptures. Simple images for complex intellectuals, luxury brands, b2b marketing, saas..
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run callmejz-ai/doodle using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"callmejz-ai/doodle:b9e155a586824e58f5a5193d65b0992ae5b6e5ef7420c1a967638922c4e103a8",
{
input: {
width: 1024,
height: 1024,
prompt: "flower",
refine: "no_refiner",
scheduler: "K_EULER",
lora_scale: 0.6,
num_outputs: 1,
guidance_scale: 7.5,
apply_watermark: true,
high_noise_frac: 0.8,
negative_prompt: "",
prompt_strength: 0.8,
num_inference_steps: 50
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run callmejz-ai/doodle using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"callmejz-ai/doodle:b9e155a586824e58f5a5193d65b0992ae5b6e5ef7420c1a967638922c4e103a8",
input={
"width": 1024,
"height": 1024,
"prompt": "flower",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": True,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run callmejz-ai/doodle using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "callmejz-ai/doodle:b9e155a586824e58f5a5193d65b0992ae5b6e5ef7420c1a967638922c4e103a8",
"input": {
"width": 1024,
"height": 1024,
"prompt": "flower",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-10-25T21:09:49.387587Z",
"created_at": "2024-10-25T21:09:20.611000Z",
"data_removed": false,
"error": null,
"id": "4dhywgsacdrgm0cjrp1s333qkc",
"input": {
"width": 1024,
"height": 1024,
"prompt": "flower",
"refine": "no_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "",
"prompt_strength": 0.8,
"num_inference_steps": 50
},
"logs": "Using seed: 7047\nEnsuring enough disk space...\nFree disk space: 1439622897664\nDownloading weights: https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar\n2024-10-25T21:09:27Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/b9e40f01def7cc54 url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar\n2024-10-25T21:09:32Z | INFO | [ Complete ] dest=/src/weights-cache/b9e40f01def7cc54 size=\"186 MB\" total_elapsed=4.907s url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar\nb''\nDownloaded weights in 5.0415003299713135 seconds\nLoading fine-tuned model\nDoes not have Unet. assume we are using LoRA\nLoading Unet LoRA\nPrompt: flower\ntxt2img mode\n 0%| | 0/50 [00:00<?, ?it/s]/usr/local/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1946: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`\ndeprecate(\n 2%|▏ | 1/50 [00:00<00:11, 4.20it/s]\n 4%|▍ | 2/50 [00:00<00:11, 4.19it/s]\n 6%|▌ | 3/50 [00:00<00:11, 4.17it/s]\n 8%|▊ | 4/50 [00:00<00:11, 4.16it/s]\n 10%|█ | 5/50 [00:01<00:10, 4.16it/s]\n 12%|█▏ | 6/50 [00:01<00:10, 4.16it/s]\n 14%|█▍ | 7/50 [00:01<00:10, 4.16it/s]\n 16%|█▌ | 8/50 [00:01<00:10, 4.15it/s]\n 18%|█▊ | 9/50 [00:02<00:09, 4.15it/s]\n 20%|██ | 10/50 [00:02<00:09, 4.15it/s]\n 22%|██▏ | 11/50 [00:02<00:09, 4.15it/s]\n 24%|██▍ | 12/50 [00:02<00:09, 4.16it/s]\n 26%|██▌ | 13/50 [00:03<00:08, 4.16it/s]\n 28%|██▊ | 14/50 [00:03<00:08, 4.15it/s]\n 30%|███ | 15/50 [00:03<00:08, 4.15it/s]\n 32%|███▏ | 16/50 [00:03<00:08, 4.16it/s]\n 34%|███▍ | 17/50 [00:04<00:07, 4.15it/s]\n 36%|███▌ | 18/50 [00:04<00:07, 4.15it/s]\n 38%|███▊ | 19/50 [00:04<00:07, 4.15it/s]\n 40%|████ | 20/50 [00:04<00:07, 4.15it/s]\n 42%|████▏ | 21/50 [00:05<00:06, 4.15it/s]\n 44%|████▍ | 22/50 [00:05<00:06, 4.15it/s]\n 46%|████▌ | 23/50 [00:05<00:06, 4.15it/s]\n 48%|████▊ | 24/50 [00:05<00:06, 4.15it/s]\n 50%|█████ | 25/50 [00:06<00:06, 4.15it/s]\n 52%|█████▏ | 26/50 [00:06<00:05, 4.15it/s]\n 54%|█████▍ | 27/50 [00:06<00:05, 4.15it/s]\n 56%|█████▌ | 28/50 [00:06<00:05, 4.15it/s]\n 58%|█████▊ | 29/50 [00:06<00:05, 4.15it/s]\n 60%|██████ | 30/50 [00:07<00:04, 4.15it/s]\n 62%|██████▏ | 31/50 [00:07<00:04, 4.15it/s]\n 64%|██████▍ | 32/50 [00:07<00:04, 4.14it/s]\n 66%|██████▌ | 33/50 [00:07<00:04, 4.14it/s]\n 68%|██████▊ | 34/50 [00:08<00:03, 4.15it/s]\n 70%|███████ | 35/50 [00:08<00:03, 4.15it/s]\n 72%|███████▏ | 36/50 [00:08<00:03, 4.14it/s]\n 74%|███████▍ | 37/50 [00:08<00:03, 4.14it/s]\n 76%|███████▌ | 38/50 [00:09<00:02, 4.15it/s]\n 78%|███████▊ | 39/50 [00:09<00:02, 4.15it/s]\n 80%|████████ | 40/50 [00:09<00:02, 4.15it/s]\n 82%|████████▏ | 41/50 [00:09<00:02, 4.15it/s]\n 84%|████████▍ | 42/50 [00:10<00:01, 4.14it/s]\n 86%|████████▌ | 43/50 [00:10<00:01, 4.14it/s]\n 88%|████████▊ | 44/50 [00:10<00:01, 4.14it/s]\n 90%|█████████ | 45/50 [00:10<00:01, 4.15it/s]\n 92%|█████████▏| 46/50 [00:11<00:00, 4.14it/s]\n 94%|█████████▍| 47/50 [00:11<00:00, 4.14it/s]\n 96%|█████████▌| 48/50 [00:11<00:00, 4.14it/s]\n 98%|█████████▊| 49/50 [00:11<00:00, 4.14it/s]\n100%|██████████| 50/50 [00:12<00:00, 4.14it/s]\n100%|██████████| 50/50 [00:12<00:00, 4.15it/s]",
"metrics": {
"predict_time": 21.646749119,
"total_time": 28.776587
},
"output": [
"https://replicate.delivery/pbxt/mwnCo7UxFypqBZTaNPfsfVbamEAzHvNGgsukppxa1ZycsbqTA/out-0.png"
],
"started_at": "2024-10-25T21:09:27.740838Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/4dhywgsacdrgm0cjrp1s333qkc",
"cancel": "https://api.replicate.com/v1/predictions/4dhywgsacdrgm0cjrp1s333qkc/cancel"
},
"version": "b9e155a586824e58f5a5193d65b0992ae5b6e5ef7420c1a967638922c4e103a8"
}
Using seed: 7047
Ensuring enough disk space...
Free disk space: 1439622897664
Downloading weights: https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
2024-10-25T21:09:27Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/b9e40f01def7cc54 url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
2024-10-25T21:09:32Z | INFO | [ Complete ] dest=/src/weights-cache/b9e40f01def7cc54 size="186 MB" total_elapsed=4.907s url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
b''
Downloaded weights in 5.0415003299713135 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Prompt: flower
txt2img mode
0%| | 0/50 [00:00<?, ?it/s]/usr/local/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1946: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▏ | 1/50 [00:00<00:11, 4.20it/s]
4%|▍ | 2/50 [00:00<00:11, 4.19it/s]
6%|▌ | 3/50 [00:00<00:11, 4.17it/s]
8%|▊ | 4/50 [00:00<00:11, 4.16it/s]
10%|█ | 5/50 [00:01<00:10, 4.16it/s]
12%|█▏ | 6/50 [00:01<00:10, 4.16it/s]
14%|█▍ | 7/50 [00:01<00:10, 4.16it/s]
16%|█▌ | 8/50 [00:01<00:10, 4.15it/s]
18%|█▊ | 9/50 [00:02<00:09, 4.15it/s]
20%|██ | 10/50 [00:02<00:09, 4.15it/s]
22%|██▏ | 11/50 [00:02<00:09, 4.15it/s]
24%|██▍ | 12/50 [00:02<00:09, 4.16it/s]
26%|██▌ | 13/50 [00:03<00:08, 4.16it/s]
28%|██▊ | 14/50 [00:03<00:08, 4.15it/s]
30%|███ | 15/50 [00:03<00:08, 4.15it/s]
32%|███▏ | 16/50 [00:03<00:08, 4.16it/s]
34%|███▍ | 17/50 [00:04<00:07, 4.15it/s]
36%|███▌ | 18/50 [00:04<00:07, 4.15it/s]
38%|███▊ | 19/50 [00:04<00:07, 4.15it/s]
40%|████ | 20/50 [00:04<00:07, 4.15it/s]
42%|████▏ | 21/50 [00:05<00:06, 4.15it/s]
44%|████▍ | 22/50 [00:05<00:06, 4.15it/s]
46%|████▌ | 23/50 [00:05<00:06, 4.15it/s]
48%|████▊ | 24/50 [00:05<00:06, 4.15it/s]
50%|█████ | 25/50 [00:06<00:06, 4.15it/s]
52%|█████▏ | 26/50 [00:06<00:05, 4.15it/s]
54%|█████▍ | 27/50 [00:06<00:05, 4.15it/s]
56%|█████▌ | 28/50 [00:06<00:05, 4.15it/s]
58%|█████▊ | 29/50 [00:06<00:05, 4.15it/s]
60%|██████ | 30/50 [00:07<00:04, 4.15it/s]
62%|██████▏ | 31/50 [00:07<00:04, 4.15it/s]
64%|██████▍ | 32/50 [00:07<00:04, 4.14it/s]
66%|██████▌ | 33/50 [00:07<00:04, 4.14it/s]
68%|██████▊ | 34/50 [00:08<00:03, 4.15it/s]
70%|███████ | 35/50 [00:08<00:03, 4.15it/s]
72%|███████▏ | 36/50 [00:08<00:03, 4.14it/s]
74%|███████▍ | 37/50 [00:08<00:03, 4.14it/s]
76%|███████▌ | 38/50 [00:09<00:02, 4.15it/s]
78%|███████▊ | 39/50 [00:09<00:02, 4.15it/s]
80%|████████ | 40/50 [00:09<00:02, 4.15it/s]
82%|████████▏ | 41/50 [00:09<00:02, 4.15it/s]
84%|████████▍ | 42/50 [00:10<00:01, 4.14it/s]
86%|████████▌ | 43/50 [00:10<00:01, 4.14it/s]
88%|████████▊ | 44/50 [00:10<00:01, 4.14it/s]
90%|█████████ | 45/50 [00:10<00:01, 4.15it/s]
92%|█████████▏| 46/50 [00:11<00:00, 4.14it/s]
94%|█████████▍| 47/50 [00:11<00:00, 4.14it/s]
96%|█████████▌| 48/50 [00:11<00:00, 4.14it/s]
98%|█████████▊| 49/50 [00:11<00:00, 4.14it/s]
100%|██████████| 50/50 [00:12<00:00, 4.14it/s]
100%|██████████| 50/50 [00:12<00:00, 4.15it/s]
This model runs on Nvidia L40S GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
Using seed: 7047
Ensuring enough disk space...
Free disk space: 1439622897664
Downloading weights: https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
2024-10-25T21:09:27Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/b9e40f01def7cc54 url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
2024-10-25T21:09:32Z | INFO | [ Complete ] dest=/src/weights-cache/b9e40f01def7cc54 size="186 MB" total_elapsed=4.907s url=https://replicate.delivery/pbxt/WZoBJcam9jK2OtB0Loes20TaJNuR8C87mhikU4gLnbTU7M1JA/trained_model.tar
b''
Downloaded weights in 5.0415003299713135 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Prompt: flower
txt2img mode
0%| | 0/50 [00:00<?, ?it/s]/usr/local/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1946: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▏ | 1/50 [00:00<00:11, 4.20it/s]
4%|▍ | 2/50 [00:00<00:11, 4.19it/s]
6%|▌ | 3/50 [00:00<00:11, 4.17it/s]
8%|▊ | 4/50 [00:00<00:11, 4.16it/s]
10%|█ | 5/50 [00:01<00:10, 4.16it/s]
12%|█▏ | 6/50 [00:01<00:10, 4.16it/s]
14%|█▍ | 7/50 [00:01<00:10, 4.16it/s]
16%|█▌ | 8/50 [00:01<00:10, 4.15it/s]
18%|█▊ | 9/50 [00:02<00:09, 4.15it/s]
20%|██ | 10/50 [00:02<00:09, 4.15it/s]
22%|██▏ | 11/50 [00:02<00:09, 4.15it/s]
24%|██▍ | 12/50 [00:02<00:09, 4.16it/s]
26%|██▌ | 13/50 [00:03<00:08, 4.16it/s]
28%|██▊ | 14/50 [00:03<00:08, 4.15it/s]
30%|███ | 15/50 [00:03<00:08, 4.15it/s]
32%|███▏ | 16/50 [00:03<00:08, 4.16it/s]
34%|███▍ | 17/50 [00:04<00:07, 4.15it/s]
36%|███▌ | 18/50 [00:04<00:07, 4.15it/s]
38%|███▊ | 19/50 [00:04<00:07, 4.15it/s]
40%|████ | 20/50 [00:04<00:07, 4.15it/s]
42%|████▏ | 21/50 [00:05<00:06, 4.15it/s]
44%|████▍ | 22/50 [00:05<00:06, 4.15it/s]
46%|████▌ | 23/50 [00:05<00:06, 4.15it/s]
48%|████▊ | 24/50 [00:05<00:06, 4.15it/s]
50%|█████ | 25/50 [00:06<00:06, 4.15it/s]
52%|█████▏ | 26/50 [00:06<00:05, 4.15it/s]
54%|█████▍ | 27/50 [00:06<00:05, 4.15it/s]
56%|█████▌ | 28/50 [00:06<00:05, 4.15it/s]
58%|█████▊ | 29/50 [00:06<00:05, 4.15it/s]
60%|██████ | 30/50 [00:07<00:04, 4.15it/s]
62%|██████▏ | 31/50 [00:07<00:04, 4.15it/s]
64%|██████▍ | 32/50 [00:07<00:04, 4.14it/s]
66%|██████▌ | 33/50 [00:07<00:04, 4.14it/s]
68%|██████▊ | 34/50 [00:08<00:03, 4.15it/s]
70%|███████ | 35/50 [00:08<00:03, 4.15it/s]
72%|███████▏ | 36/50 [00:08<00:03, 4.14it/s]
74%|███████▍ | 37/50 [00:08<00:03, 4.14it/s]
76%|███████▌ | 38/50 [00:09<00:02, 4.15it/s]
78%|███████▊ | 39/50 [00:09<00:02, 4.15it/s]
80%|████████ | 40/50 [00:09<00:02, 4.15it/s]
82%|████████▏ | 41/50 [00:09<00:02, 4.15it/s]
84%|████████▍ | 42/50 [00:10<00:01, 4.14it/s]
86%|████████▌ | 43/50 [00:10<00:01, 4.14it/s]
88%|████████▊ | 44/50 [00:10<00:01, 4.14it/s]
90%|█████████ | 45/50 [00:10<00:01, 4.15it/s]
92%|█████████▏| 46/50 [00:11<00:00, 4.14it/s]
94%|█████████▍| 47/50 [00:11<00:00, 4.14it/s]
96%|█████████▌| 48/50 [00:11<00:00, 4.14it/s]
98%|█████████▊| 49/50 [00:11<00:00, 4.14it/s]
100%|██████████| 50/50 [00:12<00:00, 4.14it/s]
100%|██████████| 50/50 [00:12<00:00, 4.15it/s]