lucataco
/
flux-queso
A Flux LoRA trained on photos of Jake's dog: Queso
- Public
- 303 runs
-
H100
Prediction
lucataco/flux-queso:d1d8238fIDd6hb0qnhdxrm00chazbsnj9pbgStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- TOK, a vibrant watercolor painting of TOK lounging on a beach
- lora_scale
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 2
- output_quality
- 80
- num_inference_steps
- 28
{ "model": "dev", "prompt": "TOK, a vibrant watercolor painting of TOK lounging on a beach", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 2, "output_quality": 80, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", { input: { model: "dev", prompt: "TOK, a vibrant watercolor painting of TOK lounging on a beach", lora_scale: 1, num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 2, output_quality: 80, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", input={ "model": "dev", "prompt": "TOK, a vibrant watercolor painting of TOK lounging on a beach", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 2, "output_quality": 80, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", "input": { "model": "dev", "prompt": "TOK, a vibrant watercolor painting of TOK lounging on a beach", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 2, "output_quality": 80, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-08-15T21:00:55.942102Z", "created_at": "2024-08-15T21:00:39.151000Z", "data_removed": false, "error": null, "id": "d6hb0qnhdxrm00chazbsnj9pbg", "input": { "model": "dev", "prompt": "TOK, a vibrant watercolor painting of TOK lounging on a beach", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 2, "output_quality": 80, "num_inference_steps": 28 }, "logs": "Using seed: 23002\nPrompt: TOK, a vibrant watercolor painting of TOK lounging on a beach\ntxt2img mode\nUsing dev model\nLoading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:07, 3.68it/s]\n 7%|▋ | 2/28 [00:00<00:06, 4.21it/s]\n 11%|█ | 3/28 [00:00<00:06, 3.93it/s]\n 14%|█▍ | 4/28 [00:01<00:06, 3.83it/s]\n 18%|█▊ | 5/28 [00:01<00:06, 3.77it/s]\n 21%|██▏ | 6/28 [00:01<00:05, 3.72it/s]\n 25%|██▌ | 7/28 [00:01<00:05, 3.70it/s]\n 29%|██▊ | 8/28 [00:02<00:05, 3.69it/s]\n 32%|███▏ | 9/28 [00:02<00:05, 3.68it/s]\n 36%|███▌ | 10/28 [00:02<00:04, 3.67it/s]\n 39%|███▉ | 11/28 [00:02<00:04, 3.67it/s]\n 43%|████▎ | 12/28 [00:03<00:04, 3.67it/s]\n 46%|████▋ | 13/28 [00:03<00:04, 3.67it/s]\n 50%|█████ | 14/28 [00:03<00:03, 3.66it/s]\n 54%|█████▎ | 15/28 [00:04<00:03, 3.66it/s]\n 57%|█████▋ | 16/28 [00:04<00:03, 3.66it/s]\n 61%|██████ | 17/28 [00:04<00:03, 3.66it/s]\n 64%|██████▍ | 18/28 [00:04<00:02, 3.66it/s]\n 68%|██████▊ | 19/28 [00:05<00:02, 3.65it/s]\n 71%|███████▏ | 20/28 [00:05<00:02, 3.66it/s]\n 75%|███████▌ | 21/28 [00:05<00:01, 3.66it/s]\n 79%|███████▊ | 22/28 [00:05<00:01, 3.65it/s]\n 82%|████████▏ | 23/28 [00:06<00:01, 3.65it/s]\n 86%|████████▌ | 24/28 [00:06<00:01, 3.66it/s]\n 89%|████████▉ | 25/28 [00:06<00:00, 3.66it/s]\n 93%|█████████▎| 26/28 [00:07<00:00, 3.66it/s]\n 96%|█████████▋| 27/28 [00:07<00:00, 3.65it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.66it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.69it/s]", "metrics": { "predict_time": 16.733405885, "total_time": 16.791102 }, "output": [ "https://replicate.delivery/yhqm/oj6PHkrzeH1BPS5620ZlDtv1ZyceltPe7nXX5YSzKfWeQPYaC/out-0.webp" ], "started_at": "2024-08-15T21:00:39.208696Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/d6hb0qnhdxrm00chazbsnj9pbg", "cancel": "https://api.replicate.com/v1/predictions/d6hb0qnhdxrm00chazbsnj9pbg/cancel" }, "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab" }
Generated inUsing seed: 23002 Prompt: TOK, a vibrant watercolor painting of TOK lounging on a beach txt2img mode Using dev model Loading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar LoRA weights loaded successfully 0%| | 0/28 [00:00<?, ?it/s] 4%|▎ | 1/28 [00:00<00:07, 3.68it/s] 7%|▋ | 2/28 [00:00<00:06, 4.21it/s] 11%|█ | 3/28 [00:00<00:06, 3.93it/s] 14%|█▍ | 4/28 [00:01<00:06, 3.83it/s] 18%|█▊ | 5/28 [00:01<00:06, 3.77it/s] 21%|██▏ | 6/28 [00:01<00:05, 3.72it/s] 25%|██▌ | 7/28 [00:01<00:05, 3.70it/s] 29%|██▊ | 8/28 [00:02<00:05, 3.69it/s] 32%|███▏ | 9/28 [00:02<00:05, 3.68it/s] 36%|███▌ | 10/28 [00:02<00:04, 3.67it/s] 39%|███▉ | 11/28 [00:02<00:04, 3.67it/s] 43%|████▎ | 12/28 [00:03<00:04, 3.67it/s] 46%|████▋ | 13/28 [00:03<00:04, 3.67it/s] 50%|█████ | 14/28 [00:03<00:03, 3.66it/s] 54%|█████▎ | 15/28 [00:04<00:03, 3.66it/s] 57%|█████▋ | 16/28 [00:04<00:03, 3.66it/s] 61%|██████ | 17/28 [00:04<00:03, 3.66it/s] 64%|██████▍ | 18/28 [00:04<00:02, 3.66it/s] 68%|██████▊ | 19/28 [00:05<00:02, 3.65it/s] 71%|███████▏ | 20/28 [00:05<00:02, 3.66it/s] 75%|███████▌ | 21/28 [00:05<00:01, 3.66it/s] 79%|███████▊ | 22/28 [00:05<00:01, 3.65it/s] 82%|████████▏ | 23/28 [00:06<00:01, 3.65it/s] 86%|████████▌ | 24/28 [00:06<00:01, 3.66it/s] 89%|████████▉ | 25/28 [00:06<00:00, 3.66it/s] 93%|█████████▎| 26/28 [00:07<00:00, 3.66it/s] 96%|█████████▋| 27/28 [00:07<00:00, 3.65it/s] 100%|██████████| 28/28 [00:07<00:00, 3.66it/s] 100%|██████████| 28/28 [00:07<00:00, 3.69it/s]
Prediction
lucataco/flux-queso:d1d8238fID7ggf05rehhrm00chatmrgtqf7gStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- a photo of TOK
- lora_scale
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3.5
- output_quality
- 80
- num_inference_steps
- 28
{ "model": "dev", "prompt": "a photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", { input: { model: "dev", prompt: "a photo of TOK", lora_scale: 1, num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3.5, output_quality: 80, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", input={ "model": "dev", "prompt": "a photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", "input": { "model": "dev", "prompt": "a photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-08-15T15:30:21.905946Z", "created_at": "2024-08-15T15:30:05.580000Z", "data_removed": false, "error": null, "id": "7ggf05rehhrm00chatmrgtqf7g", "input": { "model": "dev", "prompt": "a photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }, "logs": "Using seed: 54588\nPrompt: a photo of TOK\ntxt2img mode\nUsing dev model\nLoading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:07, 3.69it/s]\n 7%|▋ | 2/28 [00:00<00:06, 4.23it/s]\n 11%|█ | 3/28 [00:00<00:06, 3.95it/s]\n 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s]\n 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s]\n 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s]\n 25%|██▌ | 7/28 [00:01<00:05, 3.71it/s]\n 29%|██▊ | 8/28 [00:02<00:05, 3.70it/s]\n 32%|███▏ | 9/28 [00:02<00:05, 3.69it/s]\n 36%|███▌ | 10/28 [00:02<00:04, 3.68it/s]\n 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s]\n 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s]\n 46%|████▋ | 13/28 [00:03<00:04, 3.68it/s]\n 50%|█████ | 14/28 [00:03<00:03, 3.67it/s]\n 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s]\n 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s]\n 61%|██████ | 17/28 [00:04<00:02, 3.68it/s]\n 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s]\n 68%|██████▊ | 19/28 [00:05<00:02, 3.66it/s]\n 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s]\n 75%|███████▌ | 21/28 [00:05<00:01, 3.67it/s]\n 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s]\n 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s]\n 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s]\n 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s]\n 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s]\n 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.70it/s]", "metrics": { "predict_time": 16.285146017, "total_time": 16.325946 }, "output": [ "https://replicate.delivery/yhqm/mnzQ7vErFT6dEdnQdzGsebGTGgFUYfxhJxsqAEswH8uNE9STA/out-0.webp" ], "started_at": "2024-08-15T15:30:05.620800Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/7ggf05rehhrm00chatmrgtqf7g", "cancel": "https://api.replicate.com/v1/predictions/7ggf05rehhrm00chatmrgtqf7g/cancel" }, "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab" }
Generated inUsing seed: 54588 Prompt: a photo of TOK txt2img mode Using dev model Loading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar LoRA weights loaded successfully 0%| | 0/28 [00:00<?, ?it/s] 4%|▎ | 1/28 [00:00<00:07, 3.69it/s] 7%|▋ | 2/28 [00:00<00:06, 4.23it/s] 11%|█ | 3/28 [00:00<00:06, 3.95it/s] 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s] 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s] 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s] 25%|██▌ | 7/28 [00:01<00:05, 3.71it/s] 29%|██▊ | 8/28 [00:02<00:05, 3.70it/s] 32%|███▏ | 9/28 [00:02<00:05, 3.69it/s] 36%|███▌ | 10/28 [00:02<00:04, 3.68it/s] 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s] 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s] 46%|████▋ | 13/28 [00:03<00:04, 3.68it/s] 50%|█████ | 14/28 [00:03<00:03, 3.67it/s] 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s] 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s] 61%|██████ | 17/28 [00:04<00:02, 3.68it/s] 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s] 68%|██████▊ | 19/28 [00:05<00:02, 3.66it/s] 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s] 75%|███████▌ | 21/28 [00:05<00:01, 3.67it/s] 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s] 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s] 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s] 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s] 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s] 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.70it/s]
Prediction
lucataco/flux-queso:d1d8238fIDfsh3j8mnhnrm40chatn9hzc7qwStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- a photo of TOK running
- lora_scale
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3.5
- output_quality
- 80
- num_inference_steps
- 28
{ "model": "dev", "prompt": "a photo of TOK running", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", { input: { model: "dev", prompt: "a photo of TOK running", lora_scale: 1, num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3.5, output_quality: 80, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", input={ "model": "dev", "prompt": "a photo of TOK running", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", "input": { "model": "dev", "prompt": "a photo of TOK running", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-08-15T15:32:01.430608Z", "created_at": "2024-08-15T15:31:45.677000Z", "data_removed": false, "error": null, "id": "fsh3j8mnhnrm40chatn9hzc7qw", "input": { "model": "dev", "prompt": "a photo of TOK running", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }, "logs": "Using seed: 28944\nPrompt: a photo of TOK running\ntxt2img mode\nUsing dev model\nLoading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:07, 3.69it/s]\n 7%|▋ | 2/28 [00:00<00:06, 4.23it/s]\n 11%|█ | 3/28 [00:00<00:06, 3.95it/s]\n 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s]\n 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s]\n 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s]\n 25%|██▌ | 7/28 [00:01<00:05, 3.71it/s]\n 29%|██▊ | 8/28 [00:02<00:05, 3.70it/s]\n 32%|███▏ | 9/28 [00:02<00:05, 3.70it/s]\n 36%|███▌ | 10/28 [00:02<00:04, 3.69it/s]\n 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s]\n 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s]\n 46%|████▋ | 13/28 [00:03<00:04, 3.68it/s]\n 50%|█████ | 14/28 [00:03<00:03, 3.67it/s]\n 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s]\n 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s]\n 61%|██████ | 17/28 [00:04<00:02, 3.68it/s]\n 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s]\n 68%|██████▊ | 19/28 [00:05<00:02, 3.67it/s]\n 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s]\n 75%|███████▌ | 21/28 [00:05<00:01, 3.68it/s]\n 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s]\n 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s]\n 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s]\n 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s]\n 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s]\n 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.70it/s]", "metrics": { "predict_time": 15.711233138, "total_time": 15.753608 }, "output": [ "https://replicate.delivery/yhqm/AlUklq0ETZZKGpRwpHEfWuhTxwVgLZ1M1B773ehFsNrxF9STA/out-0.webp" ], "started_at": "2024-08-15T15:31:45.719375Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/fsh3j8mnhnrm40chatn9hzc7qw", "cancel": "https://api.replicate.com/v1/predictions/fsh3j8mnhnrm40chatn9hzc7qw/cancel" }, "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab" }
Generated inUsing seed: 28944 Prompt: a photo of TOK running txt2img mode Using dev model Loading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar LoRA weights loaded successfully 0%| | 0/28 [00:00<?, ?it/s] 4%|▎ | 1/28 [00:00<00:07, 3.69it/s] 7%|▋ | 2/28 [00:00<00:06, 4.23it/s] 11%|█ | 3/28 [00:00<00:06, 3.95it/s] 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s] 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s] 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s] 25%|██▌ | 7/28 [00:01<00:05, 3.71it/s] 29%|██▊ | 8/28 [00:02<00:05, 3.70it/s] 32%|███▏ | 9/28 [00:02<00:05, 3.70it/s] 36%|███▌ | 10/28 [00:02<00:04, 3.69it/s] 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s] 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s] 46%|████▋ | 13/28 [00:03<00:04, 3.68it/s] 50%|█████ | 14/28 [00:03<00:03, 3.67it/s] 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s] 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s] 61%|██████ | 17/28 [00:04<00:02, 3.68it/s] 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s] 68%|██████▊ | 19/28 [00:05<00:02, 3.67it/s] 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s] 75%|███████▌ | 21/28 [00:05<00:01, 3.68it/s] 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s] 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s] 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s] 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s] 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s] 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.70it/s]
Prediction
lucataco/flux-queso:d1d8238fInput
- model
- dev
- prompt
- a portrait photo of TOK
- lora_scale
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3.5
- output_quality
- 80
- num_inference_steps
- 28
{ "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", { input: { model: "dev", prompt: "a portrait photo of TOK", lora_scale: 1, num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3.5, output_quality: 80, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", input={ "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", "input": { "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-08-15T17:41:53.406277Z", "created_at": "2024-08-15T17:41:36.536000Z", "data_removed": false, "error": null, "id": "z7agb9bpk1rm00chawgv5p0c0r", "input": { "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }, "logs": "Using seed: 24348\nPrompt: a portrait photo of TOK\ntxt2img mode\nUsing dev model\nLoading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:07, 3.68it/s]\n 7%|▋ | 2/28 [00:00<00:06, 4.23it/s]\n 11%|█ | 3/28 [00:00<00:06, 3.95it/s]\n 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s]\n 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s]\n 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s]\n 25%|██▌ | 7/28 [00:01<00:05, 3.72it/s]\n 29%|██▊ | 8/28 [00:02<00:05, 3.71it/s]\n 32%|███▏ | 9/28 [00:02<00:05, 3.70it/s]\n 36%|███▌ | 10/28 [00:02<00:04, 3.69it/s]\n 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s]\n 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s]\n 46%|████▋ | 13/28 [00:03<00:04, 3.67it/s]\n 50%|█████ | 14/28 [00:03<00:03, 3.67it/s]\n 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s]\n 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s]\n 61%|██████ | 17/28 [00:04<00:02, 3.67it/s]\n 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s]\n 68%|██████▊ | 19/28 [00:05<00:02, 3.67it/s]\n 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s]\n 75%|███████▌ | 21/28 [00:05<00:01, 3.67it/s]\n 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s]\n 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s]\n 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s]\n 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s]\n 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s]\n 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.67it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.70it/s]", "metrics": { "predict_time": 16.815343543, "total_time": 16.870277 }, "output": [ "https://replicate.delivery/yhqm/w9AxnTDZFar0L58aIs6cHSGtP8AiwWteAMeVz8Rld8dhf9lmA/out-0.webp" ], "started_at": "2024-08-15T17:41:36.590933Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/z7agb9bpk1rm00chawgv5p0c0r", "cancel": "https://api.replicate.com/v1/predictions/z7agb9bpk1rm00chawgv5p0c0r/cancel" }, "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab" }
Generated inUsing seed: 24348 Prompt: a portrait photo of TOK txt2img mode Using dev model Loading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar LoRA weights loaded successfully 0%| | 0/28 [00:00<?, ?it/s] 4%|▎ | 1/28 [00:00<00:07, 3.68it/s] 7%|▋ | 2/28 [00:00<00:06, 4.23it/s] 11%|█ | 3/28 [00:00<00:06, 3.95it/s] 14%|█▍ | 4/28 [00:01<00:06, 3.84it/s] 18%|█▊ | 5/28 [00:01<00:06, 3.78it/s] 21%|██▏ | 6/28 [00:01<00:05, 3.74it/s] 25%|██▌ | 7/28 [00:01<00:05, 3.72it/s] 29%|██▊ | 8/28 [00:02<00:05, 3.71it/s] 32%|███▏ | 9/28 [00:02<00:05, 3.70it/s] 36%|███▌ | 10/28 [00:02<00:04, 3.69it/s] 39%|███▉ | 11/28 [00:02<00:04, 3.68it/s] 43%|████▎ | 12/28 [00:03<00:04, 3.68it/s] 46%|████▋ | 13/28 [00:03<00:04, 3.67it/s] 50%|█████ | 14/28 [00:03<00:03, 3.67it/s] 54%|█████▎ | 15/28 [00:04<00:03, 3.67it/s] 57%|█████▋ | 16/28 [00:04<00:03, 3.67it/s] 61%|██████ | 17/28 [00:04<00:02, 3.67it/s] 64%|██████▍ | 18/28 [00:04<00:02, 3.67it/s] 68%|██████▊ | 19/28 [00:05<00:02, 3.67it/s] 71%|███████▏ | 20/28 [00:05<00:02, 3.67it/s] 75%|███████▌ | 21/28 [00:05<00:01, 3.67it/s] 79%|███████▊ | 22/28 [00:05<00:01, 3.67it/s] 82%|████████▏ | 23/28 [00:06<00:01, 3.67it/s] 86%|████████▌ | 24/28 [00:06<00:01, 3.67it/s] 89%|████████▉ | 25/28 [00:06<00:00, 3.67it/s] 93%|█████████▎| 26/28 [00:07<00:00, 3.67it/s] 96%|█████████▋| 27/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.67it/s] 100%|██████████| 28/28 [00:07<00:00, 3.70it/s]
Prediction
lucataco/flux-queso:d1d8238fIDbxywcdhcs1rm40chawt9knhyf8StatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- a portrait photo of TOK
- lora_scale
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3.5
- output_quality
- 80
- num_inference_steps
- 28
{ "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", { input: { model: "dev", prompt: "a portrait photo of TOK", lora_scale: 1, num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3.5, output_quality: 80, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "lucataco/flux-queso:d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", input={ "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/flux-queso using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab", "input": { "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-08-15T18:03:06.751104Z", "created_at": "2024-08-15T18:02:02.824000Z", "data_removed": false, "error": null, "id": "bxywcdhcs1rm40chawt9knhyf8", "input": { "model": "dev", "prompt": "a portrait photo of TOK", "lora_scale": 1, "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3.5, "output_quality": 80, "num_inference_steps": 28 }, "logs": "Using seed: 33379\nPrompt: a portrait photo of TOK\ntxt2img mode\nUsing dev model\nLoading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar\nLoRA weights loaded successfully\n 0%| | 0/28 [00:00<?, ?it/s]\n 4%|▎ | 1/28 [00:00<00:07, 3.68it/s]\n 7%|▋ | 2/28 [00:00<00:06, 4.24it/s]\n 11%|█ | 3/28 [00:00<00:06, 3.95it/s]\n 14%|█▍ | 4/28 [00:01<00:06, 3.82it/s]\n 18%|█▊ | 5/28 [00:01<00:06, 3.77it/s]\n 21%|██▏ | 6/28 [00:01<00:05, 3.73it/s]\n 25%|██▌ | 7/28 [00:01<00:05, 3.70it/s]\n 29%|██▊ | 8/28 [00:02<00:05, 3.68it/s]\n 32%|███▏ | 9/28 [00:02<00:05, 3.68it/s]\n 36%|███▌ | 10/28 [00:02<00:04, 3.68it/s]\n 39%|███▉ | 11/28 [00:02<00:04, 3.66it/s]\n 43%|████▎ | 12/28 [00:03<00:04, 3.66it/s]\n 46%|████▋ | 13/28 [00:03<00:04, 3.66it/s]\n 50%|█████ | 14/28 [00:03<00:03, 3.66it/s]\n 54%|█████▎ | 15/28 [00:04<00:03, 3.66it/s]\n 57%|█████▋ | 16/28 [00:04<00:03, 3.65it/s]\n 61%|██████ | 17/28 [00:04<00:03, 3.66it/s]\n 64%|██████▍ | 18/28 [00:04<00:02, 3.66it/s]\n 68%|██████▊ | 19/28 [00:05<00:02, 3.65it/s]\n 71%|███████▏ | 20/28 [00:05<00:02, 3.65it/s]\n 75%|███████▌ | 21/28 [00:05<00:01, 3.66it/s]\n 79%|███████▊ | 22/28 [00:05<00:01, 3.66it/s]\n 82%|████████▏ | 23/28 [00:06<00:01, 3.65it/s]\n 86%|████████▌ | 24/28 [00:06<00:01, 3.65it/s]\n 89%|████████▉ | 25/28 [00:06<00:00, 3.66it/s]\n 93%|█████████▎| 26/28 [00:07<00:00, 3.66it/s]\n 96%|█████████▋| 27/28 [00:07<00:00, 3.65it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.65it/s]\n100%|██████████| 28/28 [00:07<00:00, 3.69it/s]", "metrics": { "predict_time": 17.798243099, "total_time": 63.927104 }, "output": [ "https://replicate.delivery/yhqm/m1hiqJeAZdV1QafFjOqyKrx8xeajLqG0qWbHUw36zkX0meLNB/out-0.webp" ], "started_at": "2024-08-15T18:02:48.952861Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/bxywcdhcs1rm40chawt9knhyf8", "cancel": "https://api.replicate.com/v1/predictions/bxywcdhcs1rm40chawt9knhyf8/cancel" }, "version": "d1d8238f1f13d80aafc655c5998bcdcc12d04e3bddd227c5fc840fce6ae9a2ab" }
Generated inUsing seed: 33379 Prompt: a portrait photo of TOK txt2img mode Using dev model Loading LoRA weights from https://replicate.delivery/yhqm/xIUPCppeslXbaC6D8hzONTPKFURik2zRMLmmif0GRq2f55lmA/trained_model.tar LoRA weights loaded successfully 0%| | 0/28 [00:00<?, ?it/s] 4%|▎ | 1/28 [00:00<00:07, 3.68it/s] 7%|▋ | 2/28 [00:00<00:06, 4.24it/s] 11%|█ | 3/28 [00:00<00:06, 3.95it/s] 14%|█▍ | 4/28 [00:01<00:06, 3.82it/s] 18%|█▊ | 5/28 [00:01<00:06, 3.77it/s] 21%|██▏ | 6/28 [00:01<00:05, 3.73it/s] 25%|██▌ | 7/28 [00:01<00:05, 3.70it/s] 29%|██▊ | 8/28 [00:02<00:05, 3.68it/s] 32%|███▏ | 9/28 [00:02<00:05, 3.68it/s] 36%|███▌ | 10/28 [00:02<00:04, 3.68it/s] 39%|███▉ | 11/28 [00:02<00:04, 3.66it/s] 43%|████▎ | 12/28 [00:03<00:04, 3.66it/s] 46%|████▋ | 13/28 [00:03<00:04, 3.66it/s] 50%|█████ | 14/28 [00:03<00:03, 3.66it/s] 54%|█████▎ | 15/28 [00:04<00:03, 3.66it/s] 57%|█████▋ | 16/28 [00:04<00:03, 3.65it/s] 61%|██████ | 17/28 [00:04<00:03, 3.66it/s] 64%|██████▍ | 18/28 [00:04<00:02, 3.66it/s] 68%|██████▊ | 19/28 [00:05<00:02, 3.65it/s] 71%|███████▏ | 20/28 [00:05<00:02, 3.65it/s] 75%|███████▌ | 21/28 [00:05<00:01, 3.66it/s] 79%|███████▊ | 22/28 [00:05<00:01, 3.66it/s] 82%|████████▏ | 23/28 [00:06<00:01, 3.65it/s] 86%|████████▌ | 24/28 [00:06<00:01, 3.65it/s] 89%|████████▉ | 25/28 [00:06<00:00, 3.66it/s] 93%|█████████▎| 26/28 [00:07<00:00, 3.66it/s] 96%|█████████▋| 27/28 [00:07<00:00, 3.65it/s] 100%|██████████| 28/28 [00:07<00:00, 3.65it/s] 100%|██████████| 28/28 [00:07<00:00, 3.69it/s]
Want to make some of these yourself?
Run this model