chakify / flux-dev-lora-jambox
- Public
- 28 runs
-
H100
Prediction
chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9IDj6br99xp91rmc0ckvzyr157md0StatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- Red JAMBOX on the coffee table
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "Red JAMBOX on the coffee table", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", { input: { model: "dev", prompt: "Red JAMBOX on the coffee table", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", input={ "model": "dev", "prompt": "Red JAMBOX on the coffee table", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", "input": { "model": "dev", "prompt": "Red JAMBOX on the coffee table", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-19T17:36:16.023522Z", "created_at": "2024-12-19T17:36:06.728000Z", "data_removed": false, "error": null, "id": "j6br99xp91rmc0ckvzyr157md0", "input": { "model": "dev", "prompt": "Red JAMBOX on the coffee table", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "2024-12-19 17:36:07.119 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-19 17:36:07.119 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2761.17it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2703.83it/s]\n2024-12-19 17:36:07.232 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s\nfree=28937297399808\nDownloading weights\n2024-12-19T17:36:07Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpxfzw_6tw/weights url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar\n2024-12-19T17:36:09Z | INFO | [ Complete ] dest=/tmp/tmpxfzw_6tw/weights size=\"172 MB\" total_elapsed=2.383s url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar\nDownloaded weights in 2.41s\n2024-12-19 17:36:09.642 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/31bce586c6a24140\n2024-12-19 17:36:09.713 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-19 17:36:09.713 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-19 17:36:09.714 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2762.72it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2705.08it/s]\n2024-12-19 17:36:09.826 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s\nUsing seed: 501\n0it [00:00, ?it/s]\n1it [00:00, 8.32it/s]\n2it [00:00, 5.78it/s]\n3it [00:00, 5.27it/s]\n4it [00:00, 5.06it/s]\n5it [00:00, 4.92it/s]\n6it [00:01, 4.82it/s]\n7it [00:01, 4.81it/s]\n8it [00:01, 4.80it/s]\n9it [00:01, 4.78it/s]\n10it [00:02, 4.74it/s]\n11it [00:02, 4.72it/s]\n12it [00:02, 4.73it/s]\n13it [00:02, 4.74it/s]\n14it [00:02, 4.74it/s]\n15it [00:03, 4.74it/s]\n16it [00:03, 4.72it/s]\n17it [00:03, 4.73it/s]\n18it [00:03, 4.73it/s]\n19it [00:03, 4.72it/s]\n20it [00:04, 4.73it/s]\n21it [00:04, 4.72it/s]\n22it [00:04, 4.72it/s]\n23it [00:04, 4.73it/s]\n24it [00:04, 4.73it/s]\n25it [00:05, 4.74it/s]\n26it [00:05, 4.73it/s]\n27it [00:05, 4.74it/s]\n28it [00:05, 4.74it/s]\n28it [00:05, 4.80it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 8.903158003, "total_time": 9.295522 }, "output": [ "https://replicate.delivery/xezq/4EfJXA776URyYaefRU5NmPsc5KznSjs6YHJRoSCZL8UhcB5nA/out-0.webp" ], "started_at": "2024-12-19T17:36:07.120364Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-23m3itcumkdsit7llv2ldlzyvevxvt6avllma65fmzetr7awksba", "get": "https://api.replicate.com/v1/predictions/j6br99xp91rmc0ckvzyr157md0", "cancel": "https://api.replicate.com/v1/predictions/j6br99xp91rmc0ckvzyr157md0/cancel" }, "version": "9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9" }
Generated in2024-12-19 17:36:07.119 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-19 17:36:07.119 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2761.17it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2703.83it/s] 2024-12-19 17:36:07.232 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s free=28937297399808 Downloading weights 2024-12-19T17:36:07Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpxfzw_6tw/weights url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar 2024-12-19T17:36:09Z | INFO | [ Complete ] dest=/tmp/tmpxfzw_6tw/weights size="172 MB" total_elapsed=2.383s url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar Downloaded weights in 2.41s 2024-12-19 17:36:09.642 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/31bce586c6a24140 2024-12-19 17:36:09.713 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded 2024-12-19 17:36:09.713 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-19 17:36:09.714 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2762.72it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2705.08it/s] 2024-12-19 17:36:09.826 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s Using seed: 501 0it [00:00, ?it/s] 1it [00:00, 8.32it/s] 2it [00:00, 5.78it/s] 3it [00:00, 5.27it/s] 4it [00:00, 5.06it/s] 5it [00:00, 4.92it/s] 6it [00:01, 4.82it/s] 7it [00:01, 4.81it/s] 8it [00:01, 4.80it/s] 9it [00:01, 4.78it/s] 10it [00:02, 4.74it/s] 11it [00:02, 4.72it/s] 12it [00:02, 4.73it/s] 13it [00:02, 4.74it/s] 14it [00:02, 4.74it/s] 15it [00:03, 4.74it/s] 16it [00:03, 4.72it/s] 17it [00:03, 4.73it/s] 18it [00:03, 4.73it/s] 19it [00:03, 4.72it/s] 20it [00:04, 4.73it/s] 21it [00:04, 4.72it/s] 22it [00:04, 4.72it/s] 23it [00:04, 4.73it/s] 24it [00:04, 4.73it/s] 25it [00:05, 4.74it/s] 26it [00:05, 4.73it/s] 27it [00:05, 4.74it/s] 28it [00:05, 4.74it/s] 28it [00:05, 4.80it/s] Total safe images: 1 out of 1
Prediction
chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9IDc3m00t7myxrmc0ckw01tn99mh4StatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- A family at the beach. Red JAMBOX in the middle of the beach towel.
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "A family at the beach. Red JAMBOX in the middle of the beach towel.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", { input: { model: "dev", prompt: "A family at the beach. Red JAMBOX in the middle of the beach towel.", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", input={ "model": "dev", "prompt": "A family at the beach. Red JAMBOX in the middle of the beach towel.", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", "input": { "model": "dev", "prompt": "A family at the beach. Red JAMBOX in the middle of the beach towel.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-19T17:43:02.078764Z", "created_at": "2024-12-19T17:42:55.991000Z", "data_removed": false, "error": null, "id": "c3m00t7myxrmc0ckw01tn99mh4", "input": { "model": "dev", "prompt": "A family at the beach. Red JAMBOX in the middle of the beach towel.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "Lora https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar already loaded\nUsing seed: 57833\n0it [00:00, ?it/s]\n1it [00:00, 8.37it/s]\n2it [00:00, 5.84it/s]\n3it [00:00, 5.33it/s]\n4it [00:00, 5.11it/s]\n5it [00:00, 4.98it/s]\n6it [00:01, 4.89it/s]\n7it [00:01, 4.86it/s]\n8it [00:01, 4.84it/s]\n9it [00:01, 4.83it/s]\n10it [00:01, 4.82it/s]\n11it [00:02, 4.79it/s]\n12it [00:02, 4.79it/s]\n13it [00:02, 4.80it/s]\n14it [00:02, 4.80it/s]\n15it [00:03, 4.78it/s]\n16it [00:03, 4.78it/s]\n17it [00:03, 4.79it/s]\n18it [00:03, 4.80it/s]\n19it [00:03, 4.80it/s]\n20it [00:04, 4.79it/s]\n21it [00:04, 4.78it/s]\n22it [00:04, 4.78it/s]\n23it [00:04, 4.78it/s]\n24it [00:04, 4.79it/s]\n25it [00:05, 4.79it/s]\n26it [00:05, 4.79it/s]\n27it [00:05, 4.78it/s]\n28it [00:05, 4.78it/s]\n28it [00:05, 4.86it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 6.079322364, "total_time": 6.087764 }, "output": [ "https://replicate.delivery/xezq/b4I7O8bL7MKKFtZzRfLozt2kZuXYoDlqh5P1RNuvloSTaQeTA/out-0.webp" ], "started_at": "2024-12-19T17:42:55.999441Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-e27ui4cyc5e677yvfp6nxlau25u6dlrf7yqh73oqrxummtqjlqzq", "get": "https://api.replicate.com/v1/predictions/c3m00t7myxrmc0ckw01tn99mh4", "cancel": "https://api.replicate.com/v1/predictions/c3m00t7myxrmc0ckw01tn99mh4/cancel" }, "version": "9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9" }
Generated inLora https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar already loaded Using seed: 57833 0it [00:00, ?it/s] 1it [00:00, 8.37it/s] 2it [00:00, 5.84it/s] 3it [00:00, 5.33it/s] 4it [00:00, 5.11it/s] 5it [00:00, 4.98it/s] 6it [00:01, 4.89it/s] 7it [00:01, 4.86it/s] 8it [00:01, 4.84it/s] 9it [00:01, 4.83it/s] 10it [00:01, 4.82it/s] 11it [00:02, 4.79it/s] 12it [00:02, 4.79it/s] 13it [00:02, 4.80it/s] 14it [00:02, 4.80it/s] 15it [00:03, 4.78it/s] 16it [00:03, 4.78it/s] 17it [00:03, 4.79it/s] 18it [00:03, 4.80it/s] 19it [00:03, 4.80it/s] 20it [00:04, 4.79it/s] 21it [00:04, 4.78it/s] 22it [00:04, 4.78it/s] 23it [00:04, 4.78it/s] 24it [00:04, 4.79it/s] 25it [00:05, 4.79it/s] 26it [00:05, 4.79it/s] 27it [00:05, 4.78it/s] 28it [00:05, 4.78it/s] 28it [00:05, 4.86it/s] Total safe images: 1 out of 1
Prediction
chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9IDb27j1dy82nrm80ckw03vq0gcmcStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", { input: { model: "dev", prompt: "Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", input={ "model": "dev", "prompt": "Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run chakify/flux-dev-lora-jambox using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "chakify/flux-dev-lora-jambox:9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9", "input": { "model": "dev", "prompt": "Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-19T17:47:15.338578Z", "created_at": "2024-12-19T17:47:06.645000Z", "data_removed": false, "error": null, "id": "b27j1dy82nrm80ckw03vq0gcmc", "input": { "model": "dev", "prompt": "Red JAMBOX in the middle of the bookselves, in an office with black bookshelves.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "2024-12-19 17:47:06.725 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-19 17:47:06.725 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 95%|█████████▌| 289/304 [00:00<00:00, 2887.86it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2785.06it/s]\n2024-12-19 17:47:06.835 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-19 17:47:06.835 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 55%|█████▌ | 168/304 [00:00<00:00, 1677.44it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 1751.35it/s]\n2024-12-19 17:47:07.009 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.28s\nfree=28661308735488\nDownloading weights\n2024-12-19T17:47:07Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpey9oot4v/weights url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar\n2024-12-19T17:47:09Z | INFO | [ Complete ] dest=/tmp/tmpey9oot4v/weights size=\"172 MB\" total_elapsed=2.013s url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar\nDownloaded weights in 2.04s\n2024-12-19 17:47:09.050 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/31bce586c6a24140\n2024-12-19 17:47:09.123 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-19 17:47:09.123 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-19 17:47:09.123 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 90%|████████▉ | 273/304 [00:00<00:00, 2725.71it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2658.18it/s]\n2024-12-19 17:47:09.238 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.19s\nUsing seed: 48234\n0it [00:00, ?it/s]\n1it [00:00, 8.35it/s]\n2it [00:00, 5.85it/s]\n3it [00:00, 5.33it/s]\n4it [00:00, 5.12it/s]\n5it [00:00, 5.01it/s]\n6it [00:01, 4.93it/s]\n7it [00:01, 4.89it/s]\n8it [00:01, 4.86it/s]\n9it [00:01, 4.85it/s]\n10it [00:01, 4.84it/s]\n11it [00:02, 4.83it/s]\n12it [00:02, 4.82it/s]\n13it [00:02, 4.82it/s]\n14it [00:02, 4.82it/s]\n15it [00:03, 4.81it/s]\n16it [00:03, 4.80it/s]\n17it [00:03, 4.81it/s]\n18it [00:03, 4.80it/s]\n19it [00:03, 4.80it/s]\n20it [00:04, 4.81it/s]\n21it [00:04, 4.81it/s]\n22it [00:04, 4.81it/s]\n23it [00:04, 4.81it/s]\n24it [00:04, 4.81it/s]\n25it [00:05, 4.81it/s]\n26it [00:05, 4.81it/s]\n27it [00:05, 4.81it/s]\n28it [00:05, 4.81it/s]\n28it [00:05, 4.88it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 8.612332211, "total_time": 8.693578 }, "output": [ "https://replicate.delivery/xezq/f2mqe7esIoBlLpRRnfzeCua4Vp1wfPeUG4ln3JSAr0QpRcQeTA/out-0.webp" ], "started_at": "2024-12-19T17:47:06.726245Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-u7srefqh7whosvvzdy6yho3nsx5dhqqpnt6q3m4txluucb7v3fha", "get": "https://api.replicate.com/v1/predictions/b27j1dy82nrm80ckw03vq0gcmc", "cancel": "https://api.replicate.com/v1/predictions/b27j1dy82nrm80ckw03vq0gcmc/cancel" }, "version": "9dfdeb3a61e1f7f2e056da4fbc9d3ebe7af637c17c0f06acfbfdcd3dec5be9e9" }
Generated in2024-12-19 17:47:06.725 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-19 17:47:06.725 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 95%|█████████▌| 289/304 [00:00<00:00, 2887.86it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2785.06it/s] 2024-12-19 17:47:06.835 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-19 17:47:06.835 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 55%|█████▌ | 168/304 [00:00<00:00, 1677.44it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 1751.35it/s] 2024-12-19 17:47:07.009 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.28s free=28661308735488 Downloading weights 2024-12-19T17:47:07Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpey9oot4v/weights url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar 2024-12-19T17:47:09Z | INFO | [ Complete ] dest=/tmp/tmpey9oot4v/weights size="172 MB" total_elapsed=2.013s url=https://replicate.delivery/xezq/d5bqeLHzgyTgB6ZNTgeRiQz2354rWndrtnnSaf9XszVNDB5nA/trained_model.tar Downloaded weights in 2.04s 2024-12-19 17:47:09.050 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/31bce586c6a24140 2024-12-19 17:47:09.123 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded 2024-12-19 17:47:09.123 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-19 17:47:09.123 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 90%|████████▉ | 273/304 [00:00<00:00, 2725.71it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2658.18it/s] 2024-12-19 17:47:09.238 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.19s Using seed: 48234 0it [00:00, ?it/s] 1it [00:00, 8.35it/s] 2it [00:00, 5.85it/s] 3it [00:00, 5.33it/s] 4it [00:00, 5.12it/s] 5it [00:00, 5.01it/s] 6it [00:01, 4.93it/s] 7it [00:01, 4.89it/s] 8it [00:01, 4.86it/s] 9it [00:01, 4.85it/s] 10it [00:01, 4.84it/s] 11it [00:02, 4.83it/s] 12it [00:02, 4.82it/s] 13it [00:02, 4.82it/s] 14it [00:02, 4.82it/s] 15it [00:03, 4.81it/s] 16it [00:03, 4.80it/s] 17it [00:03, 4.81it/s] 18it [00:03, 4.80it/s] 19it [00:03, 4.80it/s] 20it [00:04, 4.81it/s] 21it [00:04, 4.81it/s] 22it [00:04, 4.81it/s] 23it [00:04, 4.81it/s] 24it [00:04, 4.81it/s] 25it [00:05, 4.81it/s] 26it [00:05, 4.81it/s] 27it [00:05, 4.81it/s] 28it [00:05, 4.81it/s] 28it [00:05, 4.88it/s] Total safe images: 1 out of 1
Want to make some of these yourself?
Run this model