Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
rinatkurmaev /flux-dev-lora-tatra-t3:25b2bc71
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run rinatkurmaev/flux-dev-lora-tatra-t3 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"rinatkurmaev/flux-dev-lora-tatra-t3:25b2bc71bd11c42a9e7ce2f91995ded93cc12f1f23d2cbb7fff77f3da1bb65c4",
{
input: {
model: "dev",
width: 1024,
height: 1024,
prompt: "A tram model tatra t3 TATRAT3 on a city street. The tram is green and white. The tram is visible from front side",
go_fast: true,
lora_scale: 1,
megapixels: "1",
num_outputs: 1,
aspect_ratio: "1:1",
output_format: "png",
guidance_scale: 3,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run rinatkurmaev/flux-dev-lora-tatra-t3 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"rinatkurmaev/flux-dev-lora-tatra-t3:25b2bc71bd11c42a9e7ce2f91995ded93cc12f1f23d2cbb7fff77f3da1bb65c4",
input={
"model": "dev",
"width": 1024,
"height": 1024,
"prompt": "A tram model tatra t3 TATRAT3 on a city street. The tram is green and white. The tram is visible from front side",
"go_fast": True,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "png",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run rinatkurmaev/flux-dev-lora-tatra-t3 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "rinatkurmaev/flux-dev-lora-tatra-t3:25b2bc71bd11c42a9e7ce2f91995ded93cc12f1f23d2cbb7fff77f3da1bb65c4",
"input": {
"model": "dev",
"width": 1024,
"height": 1024,
"prompt": "A tram model tatra t3 TATRAT3 on a city street. The tram is green and white. The tram is visible from front side",
"go_fast": true,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "png",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2025-02-07T23:10:38.538050Z",
"created_at": "2025-02-07T23:10:25.748000Z",
"data_removed": false,
"error": null,
"id": "6y3tyvp1thrm80cmwapspcvf58",
"input": {
"model": "dev",
"width": 1024,
"height": 1024,
"prompt": "A tram model tatra t3 TATRAT3 on a city street. The tram is green and white. The tram is visible from front side",
"go_fast": true,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "png",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
},
"logs": "2025-02-07 23:10:25.833 | INFO | fp8.lora_loading:restore_clones:592 - Unloaded 304 layers\n2025-02-07 23:10:25.834 | SUCCESS | fp8.lora_loading:unload_loras:563 - LoRAs unloaded in 0.025s\nfree=28920611713024\nDownloading weights\n2025-02-07T23:10:25Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpw8387t1s/weights url=https://replicate.delivery/xezq/5wg4NzY5IOIuMZ4bhf8BTSUhf8hq4vj2AWECTkhWETqANDNUA/trained_model.tar\n2025-02-07T23:10:35Z | INFO | [ Complete ] dest=/tmp/tmpw8387t1s/weights size=\"215 MB\" total_elapsed=9.180s url=https://replicate.delivery/xezq/5wg4NzY5IOIuMZ4bhf8BTSUhf8hq4vj2AWECTkhWETqANDNUA/trained_model.tar\nDownloaded weights in 9.23s\n2025-02-07 23:10:35.061 | INFO | fp8.lora_loading:convert_lora_weights:502 - Loading LoRA weights for /src/weights-cache/265dc36a7a34f2ad\n2025-02-07 23:10:35.143 | INFO | fp8.lora_loading:convert_lora_weights:523 - LoRA weights loaded\n2025-02-07 23:10:35.143 | DEBUG | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:602 - Extracting keys\n2025-02-07 23:10:35.143 | DEBUG | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:609 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 40%|████ | 123/304 [00:00<00:00, 1224.57it/s]\nApplying LoRA: 81%|████████ | 246/304 [00:00<00:00, 951.82it/s] \nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 966.91it/s]\n2025-02-07 23:10:35.458 | INFO | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:661 - Loading LoRA in fp8\n2025-02-07 23:10:35.458 | SUCCESS | fp8.lora_loading:load_lora:542 - LoRA applied in 0.4s\nrunning quantized prediction\nUsing seed: 1555528026\n 0%| | 0/28 [00:00<?, ?it/s]\n 7%|▋ | 2/28 [00:00<00:01, 17.47it/s]\n 14%|█▍ | 4/28 [00:00<00:01, 12.95it/s]\n 21%|██▏ | 6/28 [00:00<00:01, 11.94it/s]\n 29%|██▊ | 8/28 [00:00<00:01, 11.52it/s]\n 36%|███▌ | 10/28 [00:00<00:01, 11.17it/s]\n 43%|████▎ | 12/28 [00:01<00:01, 10.84it/s]\n 50%|█████ | 14/28 [00:01<00:01, 10.84it/s]\n 57%|█████▋ | 16/28 [00:01<00:01, 10.86it/s]\n 64%|██████▍ | 18/28 [00:01<00:00, 10.89it/s]\n 71%|███████▏ | 20/28 [00:01<00:00, 10.86it/s]\n 79%|███████▊ | 22/28 [00:01<00:00, 10.74it/s]\n 86%|████████▌ | 24/28 [00:02<00:00, 10.73it/s]\n 93%|█████████▎| 26/28 [00:02<00:00, 10.77it/s]\n100%|██████████| 28/28 [00:02<00:00, 10.80it/s]\n100%|██████████| 28/28 [00:02<00:00, 11.07it/s]\nTotal safe images: 1 out of 1",
"metrics": {
"predict_time": 12.726937058,
"total_time": 12.79005
},
"output": [
"https://replicate.delivery/xezq/509GiRoMeyRcMyD1rKu2vBZqSGkAWZyHN11iJlFVVDS3JiGKA/out-0.png"
],
"started_at": "2025-02-07T23:10:25.811113Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-4aipjgd7puh2y2ybodata7rzgjt2gojffyrzfflaqfatkiax2qdq",
"get": "https://api.replicate.com/v1/predictions/6y3tyvp1thrm80cmwapspcvf58",
"cancel": "https://api.replicate.com/v1/predictions/6y3tyvp1thrm80cmwapspcvf58/cancel"
},
"version": "25b2bc71bd11c42a9e7ce2f91995ded93cc12f1f23d2cbb7fff77f3da1bb65c4"
}
2025-02-07 23:10:25.833 | INFO | fp8.lora_loading:restore_clones:592 - Unloaded 304 layers
2025-02-07 23:10:25.834 | SUCCESS | fp8.lora_loading:unload_loras:563 - LoRAs unloaded in 0.025s
free=28920611713024
Downloading weights
2025-02-07T23:10:25Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpw8387t1s/weights url=https://replicate.delivery/xezq/5wg4NzY5IOIuMZ4bhf8BTSUhf8hq4vj2AWECTkhWETqANDNUA/trained_model.tar
2025-02-07T23:10:35Z | INFO | [ Complete ] dest=/tmp/tmpw8387t1s/weights size="215 MB" total_elapsed=9.180s url=https://replicate.delivery/xezq/5wg4NzY5IOIuMZ4bhf8BTSUhf8hq4vj2AWECTkhWETqANDNUA/trained_model.tar
Downloaded weights in 9.23s
2025-02-07 23:10:35.061 | INFO | fp8.lora_loading:convert_lora_weights:502 - Loading LoRA weights for /src/weights-cache/265dc36a7a34f2ad
2025-02-07 23:10:35.143 | INFO | fp8.lora_loading:convert_lora_weights:523 - LoRA weights loaded
2025-02-07 23:10:35.143 | DEBUG | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:602 - Extracting keys
2025-02-07 23:10:35.143 | DEBUG | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:609 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 40%|████ | 123/304 [00:00<00:00, 1224.57it/s]
Applying LoRA: 81%|████████ | 246/304 [00:00<00:00, 951.82it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 966.91it/s]
2025-02-07 23:10:35.458 | INFO | fp8.lora_loading:apply_lora_to_model_and_optionally_store_clones:661 - Loading LoRA in fp8
2025-02-07 23:10:35.458 | SUCCESS | fp8.lora_loading:load_lora:542 - LoRA applied in 0.4s
running quantized prediction
Using seed: 1555528026
0%| | 0/28 [00:00<?, ?it/s]
7%|▋ | 2/28 [00:00<00:01, 17.47it/s]
14%|█▍ | 4/28 [00:00<00:01, 12.95it/s]
21%|██▏ | 6/28 [00:00<00:01, 11.94it/s]
29%|██▊ | 8/28 [00:00<00:01, 11.52it/s]
36%|███▌ | 10/28 [00:00<00:01, 11.17it/s]
43%|████▎ | 12/28 [00:01<00:01, 10.84it/s]
50%|█████ | 14/28 [00:01<00:01, 10.84it/s]
57%|█████▋ | 16/28 [00:01<00:01, 10.86it/s]
64%|██████▍ | 18/28 [00:01<00:00, 10.89it/s]
71%|███████▏ | 20/28 [00:01<00:00, 10.86it/s]
79%|███████▊ | 22/28 [00:01<00:00, 10.74it/s]
86%|████████▌ | 24/28 [00:02<00:00, 10.73it/s]
93%|█████████▎| 26/28 [00:02<00:00, 10.77it/s]
100%|██████████| 28/28 [00:02<00:00, 10.80it/s]
100%|██████████| 28/28 [00:02<00:00, 11.07it/s]
Total safe images: 1 out of 1