Readme
This model doesn't have a readme.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run cristobalascencio/wirra using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"cristobalascencio/wirra:6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e",
{
input: {
model: "dev",
prompt: "A pictogram of a data center in the middle of the composition, in the style of wirra",
go_fast: false,
lora_scale: 1.3,
megapixels: "1",
num_outputs: 2,
aspect_ratio: "1:1",
output_format: "webp",
guidance_scale: 3.5,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run cristobalascencio/wirra using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"cristobalascencio/wirra:6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e",
input={
"model": "dev",
"prompt": "A pictogram of a data center in the middle of the composition, in the style of wirra",
"go_fast": False,
"lora_scale": 1.3,
"megapixels": "1",
"num_outputs": 2,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3.5,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run cristobalascencio/wirra using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e",
"input": {
"model": "dev",
"prompt": "A pictogram of a data center in the middle of the composition, in the style of wirra",
"go_fast": false,
"lora_scale": 1.3,
"megapixels": "1",
"num_outputs": 2,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3.5,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/cristobalascencio/wirra@sha256:6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e \
-i 'model="dev"' \
-i 'prompt="A pictogram of a data center in the middle of the composition, in the style of wirra"' \
-i 'go_fast=false' \
-i 'lora_scale=1.3' \
-i 'megapixels="1"' \
-i 'num_outputs=2' \
-i 'aspect_ratio="1:1"' \
-i 'output_format="webp"' \
-i 'guidance_scale=3.5' \
-i 'output_quality=80' \
-i 'prompt_strength=0.8' \
-i 'extra_lora_scale=1' \
-i 'num_inference_steps=28'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/cristobalascencio/wirra@sha256:6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "model": "dev", "prompt": "A pictogram of a data center in the middle of the composition, in the style of wirra", "go_fast": false, "lora_scale": 1.3, "megapixels": "1", "num_outputs": 2, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-08-22T22:42:16.847213Z",
"created_at": "2024-08-22T22:41:48.459000Z",
"data_removed": false,
"error": null,
"id": "f03tkmphndrm20chfh08mzt4mc",
"input": {
"model": "dev",
"prompt": "A pictogram of a data center in the middle of the composition, in the style of wirra",
"lora_scale": 1.3,
"num_outputs": 2,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3.5,
"output_quality": 80,
"num_inference_steps": 28
},
"logs": "Using seed: 23768\nPrompt: A pictogram of a data center in the middle of the composition, in the style of wirra\ntxt2img mode\nUsing dev model\nLoading LoRA weights\nEnsuring enough disk space...\nFree disk space: 9814201135104\nDownloading weights\n2024-08-22T22:41:49Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/1187d2c8e3e14db7 url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar\n2024-08-22T22:41:52Z | INFO | [ Complete ] dest=/src/weights-cache/1187d2c8e3e14db7 size=\"172 MB\" total_elapsed=3.181s url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar\nb''\nDownloaded weights in 3.2123372554779053 seconds\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:14, 1.87it/s]\n 7%|▋ | 2/28 [00:00<00:12, 2.13it/s]\n 11%|█ | 3/28 [00:01<00:12, 2.00it/s]\n 14%|█▍ | 4/28 [00:02<00:12, 1.95it/s]\n 18%|█▊ | 5/28 [00:02<00:11, 1.92it/s]\n 21%|██▏ | 6/28 [00:03<00:11, 1.90it/s]\n 25%|██▌ | 7/28 [00:03<00:11, 1.90it/s]\n 29%|██▊ | 8/28 [00:04<00:10, 1.89it/s]\n 32%|███▏ | 9/28 [00:04<00:10, 1.88it/s]\n 36%|███▌ | 10/28 [00:05<00:09, 1.88it/s]\n 39%|███▉ | 11/28 [00:05<00:09, 1.88it/s]\n 43%|████▎ | 12/28 [00:06<00:08, 1.87it/s]\n 46%|████▋ | 13/28 [00:06<00:08, 1.87it/s]\n 50%|█████ | 14/28 [00:07<00:07, 1.87it/s]\n 54%|█████▎ | 15/28 [00:07<00:06, 1.87it/s]\n 57%|█████▋ | 16/28 [00:08<00:06, 1.87it/s]\n 61%|██████ | 17/28 [00:08<00:05, 1.87it/s]\n 64%|██████▍ | 18/28 [00:09<00:05, 1.87it/s]\n 68%|██████▊ | 19/28 [00:10<00:04, 1.87it/s]\n 71%|███████▏ | 20/28 [00:10<00:04, 1.87it/s]\n 75%|███████▌ | 21/28 [00:11<00:03, 1.87it/s]\n 79%|███████▊ | 22/28 [00:11<00:03, 1.86it/s]\n 82%|████████▏ | 23/28 [00:12<00:02, 1.87it/s]\n 86%|████████▌ | 24/28 [00:12<00:02, 1.86it/s]\n 89%|████████▉ | 25/28 [00:13<00:01, 1.86it/s]\n 93%|█████████▎| 26/28 [00:13<00:01, 1.87it/s]\n 96%|█████████▋| 27/28 [00:14<00:00, 1.86it/s]\n100%|██████████| 28/28 [00:14<00:00, 1.87it/s]\n100%|██████████| 28/28 [00:14<00:00, 1.88it/s]",
"metrics": {
"predict_time": 27.372243066,
"total_time": 28.388213
},
"output": [
"https://replicate.delivery/yhqm/lmqMRb1fJpzcJyXz7j3081vXNfZDzgTW5Ed2kqzsER1IDXVTA/out-0.webp",
"https://replicate.delivery/yhqm/OvNzerg2qdWeQEewQyvRfqsPyPM9oEwdQrSd5IPHdtggMcVNB/out-1.webp"
],
"started_at": "2024-08-22T22:41:49.474970Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/f03tkmphndrm20chfh08mzt4mc",
"cancel": "https://api.replicate.com/v1/predictions/f03tkmphndrm20chfh08mzt4mc/cancel"
},
"version": "6e1cf8d7882937eb157800d82d8e5575c27c2c5e28b1d1c663fa773627f8c88e"
}
Using seed: 23768
Prompt: A pictogram of a data center in the middle of the composition, in the style of wirra
txt2img mode
Using dev model
Loading LoRA weights
Ensuring enough disk space...
Free disk space: 9814201135104
Downloading weights
2024-08-22T22:41:49Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/1187d2c8e3e14db7 url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar
2024-08-22T22:41:52Z | INFO | [ Complete ] dest=/src/weights-cache/1187d2c8e3e14db7 size="172 MB" total_elapsed=3.181s url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar
b''
Downloaded weights in 3.2123372554779053 seconds
LoRA weights loaded successfully
0%| | 0/28 [00:00<?, ?it/s]
4%|▎ | 1/28 [00:00<00:14, 1.87it/s]
7%|▋ | 2/28 [00:00<00:12, 2.13it/s]
11%|█ | 3/28 [00:01<00:12, 2.00it/s]
14%|█▍ | 4/28 [00:02<00:12, 1.95it/s]
18%|█▊ | 5/28 [00:02<00:11, 1.92it/s]
21%|██▏ | 6/28 [00:03<00:11, 1.90it/s]
25%|██▌ | 7/28 [00:03<00:11, 1.90it/s]
29%|██▊ | 8/28 [00:04<00:10, 1.89it/s]
32%|███▏ | 9/28 [00:04<00:10, 1.88it/s]
36%|███▌ | 10/28 [00:05<00:09, 1.88it/s]
39%|███▉ | 11/28 [00:05<00:09, 1.88it/s]
43%|████▎ | 12/28 [00:06<00:08, 1.87it/s]
46%|████▋ | 13/28 [00:06<00:08, 1.87it/s]
50%|█████ | 14/28 [00:07<00:07, 1.87it/s]
54%|█████▎ | 15/28 [00:07<00:06, 1.87it/s]
57%|█████▋ | 16/28 [00:08<00:06, 1.87it/s]
61%|██████ | 17/28 [00:08<00:05, 1.87it/s]
64%|██████▍ | 18/28 [00:09<00:05, 1.87it/s]
68%|██████▊ | 19/28 [00:10<00:04, 1.87it/s]
71%|███████▏ | 20/28 [00:10<00:04, 1.87it/s]
75%|███████▌ | 21/28 [00:11<00:03, 1.87it/s]
79%|███████▊ | 22/28 [00:11<00:03, 1.86it/s]
82%|████████▏ | 23/28 [00:12<00:02, 1.87it/s]
86%|████████▌ | 24/28 [00:12<00:02, 1.86it/s]
89%|████████▉ | 25/28 [00:13<00:01, 1.86it/s]
93%|█████████▎| 26/28 [00:13<00:01, 1.87it/s]
96%|█████████▋| 27/28 [00:14<00:00, 1.86it/s]
100%|██████████| 28/28 [00:14<00:00, 1.87it/s]
100%|██████████| 28/28 [00:14<00:00, 1.88it/s]
This model runs on Nvidia H100 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
Using seed: 23768
Prompt: A pictogram of a data center in the middle of the composition, in the style of wirra
txt2img mode
Using dev model
Loading LoRA weights
Ensuring enough disk space...
Free disk space: 9814201135104
Downloading weights
2024-08-22T22:41:49Z | INFO | [ Initiating ] chunk_size=150M dest=/src/weights-cache/1187d2c8e3e14db7 url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar
2024-08-22T22:41:52Z | INFO | [ Complete ] dest=/src/weights-cache/1187d2c8e3e14db7 size="172 MB" total_elapsed=3.181s url=https://replicate.delivery/yhqm/HEKMeQtqZLx0aSDuhy0eZUtvO0YqRNsbnoAxfCYtJ0AsUtqmA/trained_model.tar
b''
Downloaded weights in 3.2123372554779053 seconds
LoRA weights loaded successfully
0%| | 0/28 [00:00<?, ?it/s]
4%|▎ | 1/28 [00:00<00:14, 1.87it/s]
7%|▋ | 2/28 [00:00<00:12, 2.13it/s]
11%|█ | 3/28 [00:01<00:12, 2.00it/s]
14%|█▍ | 4/28 [00:02<00:12, 1.95it/s]
18%|█▊ | 5/28 [00:02<00:11, 1.92it/s]
21%|██▏ | 6/28 [00:03<00:11, 1.90it/s]
25%|██▌ | 7/28 [00:03<00:11, 1.90it/s]
29%|██▊ | 8/28 [00:04<00:10, 1.89it/s]
32%|███▏ | 9/28 [00:04<00:10, 1.88it/s]
36%|███▌ | 10/28 [00:05<00:09, 1.88it/s]
39%|███▉ | 11/28 [00:05<00:09, 1.88it/s]
43%|████▎ | 12/28 [00:06<00:08, 1.87it/s]
46%|████▋ | 13/28 [00:06<00:08, 1.87it/s]
50%|█████ | 14/28 [00:07<00:07, 1.87it/s]
54%|█████▎ | 15/28 [00:07<00:06, 1.87it/s]
57%|█████▋ | 16/28 [00:08<00:06, 1.87it/s]
61%|██████ | 17/28 [00:08<00:05, 1.87it/s]
64%|██████▍ | 18/28 [00:09<00:05, 1.87it/s]
68%|██████▊ | 19/28 [00:10<00:04, 1.87it/s]
71%|███████▏ | 20/28 [00:10<00:04, 1.87it/s]
75%|███████▌ | 21/28 [00:11<00:03, 1.87it/s]
79%|███████▊ | 22/28 [00:11<00:03, 1.86it/s]
82%|████████▏ | 23/28 [00:12<00:02, 1.87it/s]
86%|████████▌ | 24/28 [00:12<00:02, 1.86it/s]
89%|████████▉ | 25/28 [00:13<00:01, 1.86it/s]
93%|█████████▎| 26/28 [00:13<00:01, 1.87it/s]
96%|█████████▋| 27/28 [00:14<00:00, 1.86it/s]
100%|██████████| 28/28 [00:14<00:00, 1.87it/s]
100%|██████████| 28/28 [00:14<00:00, 1.88it/s]