colinmcdonnell22 / emergence-shard
The Emergence Image Generation model for the Shard faction.
- Public
- 798 runs
-
H100
Prediction
colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688dfIDq85keycwd1rme0cm2jvbvtbh1mStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- a large shrd creature walking around t pole
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "a large shrd creature walking around t pole", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", { input: { model: "dev", prompt: "a large shrd creature walking around t pole", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", input={ "model": "dev", "prompt": "a large shrd creature walking around t pole", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", "input": { "model": "dev", "prompt": "a large shrd creature walking around t pole", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-29T23:18:29.168856Z", "created_at": "2024-12-29T23:18:19.496000Z", "data_removed": false, "error": null, "id": "q85keycwd1rme0cm2jvbvtbh1m", "input": { "model": "dev", "prompt": "a large shrd creature walking around t pole", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "2024-12-29 23:18:19.605 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 23:18:19.606 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 93%|█████████▎| 283/304 [00:00<00:00, 2825.66it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2692.53it/s]\n2024-12-29 23:18:19.719 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s\nfree=28745962782720\nDownloading weights\n2024-12-29T23:18:19Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp4js1rvuz/weights url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar\n2024-12-29T23:18:22Z | INFO | [ Complete ] dest=/tmp/tmp4js1rvuz/weights size=\"194 MB\" total_elapsed=3.139s url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar\nDownloaded weights in 3.16s\n2024-12-29 23:18:22.884 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/5d8f5ed1fc490125\n2024-12-29 23:18:22.962 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-29 23:18:22.962 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 23:18:22.962 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 96%|█████████▌| 292/304 [00:00<00:00, 2891.75it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2862.92it/s]\n2024-12-29 23:18:23.069 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s\nUsing seed: 42273\n0it [00:00, ?it/s]\n1it [00:00, 8.33it/s]\n2it [00:00, 5.85it/s]\n3it [00:00, 5.33it/s]\n4it [00:00, 5.13it/s]\n5it [00:00, 5.01it/s]\n6it [00:01, 4.92it/s]\n7it [00:01, 4.88it/s]\n8it [00:01, 4.87it/s]\n9it [00:01, 4.86it/s]\n10it [00:01, 4.85it/s]\n11it [00:02, 4.84it/s]\n12it [00:02, 4.83it/s]\n13it [00:02, 4.83it/s]\n14it [00:02, 4.84it/s]\n15it [00:03, 4.84it/s]\n16it [00:03, 4.83it/s]\n17it [00:03, 4.83it/s]\n18it [00:03, 4.82it/s]\n19it [00:03, 4.82it/s]\n20it [00:04, 4.83it/s]\n21it [00:04, 4.83it/s]\n22it [00:04, 4.83it/s]\n23it [00:04, 4.83it/s]\n24it [00:04, 4.83it/s]\n25it [00:05, 4.82it/s]\n26it [00:05, 4.82it/s]\n27it [00:05, 4.82it/s]\n28it [00:05, 4.81it/s]\n28it [00:05, 4.90it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 9.562059261, "total_time": 9.672856 }, "output": [ "https://replicate.delivery/xezq/z1UkZ5VSB86YEZsi997T5bL9FLD9UuOvtj0eTECx0jziV8fTA/out-0.webp" ], "started_at": "2024-12-29T23:18:19.606797Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-ryemkwse6o7xecsksnfandcojzuw2kdjmep7h6reynnerrpknzxq", "get": "https://api.replicate.com/v1/predictions/q85keycwd1rme0cm2jvbvtbh1m", "cancel": "https://api.replicate.com/v1/predictions/q85keycwd1rme0cm2jvbvtbh1m/cancel" }, "version": "1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df" }
Generated in2024-12-29 23:18:19.605 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-29 23:18:19.606 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 93%|█████████▎| 283/304 [00:00<00:00, 2825.66it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2692.53it/s] 2024-12-29 23:18:19.719 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s free=28745962782720 Downloading weights 2024-12-29T23:18:19Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp4js1rvuz/weights url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar 2024-12-29T23:18:22Z | INFO | [ Complete ] dest=/tmp/tmp4js1rvuz/weights size="194 MB" total_elapsed=3.139s url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar Downloaded weights in 3.16s 2024-12-29 23:18:22.884 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/5d8f5ed1fc490125 2024-12-29 23:18:22.962 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded 2024-12-29 23:18:22.962 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-29 23:18:22.962 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 96%|█████████▌| 292/304 [00:00<00:00, 2891.75it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2862.92it/s] 2024-12-29 23:18:23.069 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s Using seed: 42273 0it [00:00, ?it/s] 1it [00:00, 8.33it/s] 2it [00:00, 5.85it/s] 3it [00:00, 5.33it/s] 4it [00:00, 5.13it/s] 5it [00:00, 5.01it/s] 6it [00:01, 4.92it/s] 7it [00:01, 4.88it/s] 8it [00:01, 4.87it/s] 9it [00:01, 4.86it/s] 10it [00:01, 4.85it/s] 11it [00:02, 4.84it/s] 12it [00:02, 4.83it/s] 13it [00:02, 4.83it/s] 14it [00:02, 4.84it/s] 15it [00:03, 4.84it/s] 16it [00:03, 4.83it/s] 17it [00:03, 4.83it/s] 18it [00:03, 4.82it/s] 19it [00:03, 4.82it/s] 20it [00:04, 4.83it/s] 21it [00:04, 4.83it/s] 22it [00:04, 4.83it/s] 23it [00:04, 4.83it/s] 24it [00:04, 4.83it/s] 25it [00:05, 4.82it/s] 26it [00:05, 4.82it/s] 27it [00:05, 4.82it/s] 28it [00:05, 4.81it/s] 28it [00:05, 4.90it/s] Total safe images: 1 out of 1
Prediction
colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688dfIDk21txghvvnrmc0cm2jxrqnsp6gStatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", { input: { model: "dev", prompt: "shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", input={ "model": "dev", "prompt": "shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run colinmcdonnell22/emergence-shard using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "colinmcdonnell22/emergence-shard:1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df", "input": { "model": "dev", "prompt": "shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-29T23:23:38.650648Z", "created_at": "2024-12-29T23:23:22.461000Z", "data_removed": false, "error": null, "id": "k21txghvvnrmc0cm2jxrqnsp6g", "input": { "model": "dev", "prompt": "shrd dark sage wanderer holding ancient glowing lantern, flowing emerald and black robes, iridescent tentacles, mystical fantasy illustration", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "2024-12-29 23:23:29.407 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 23:23:29.407 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 93%|█████████▎| 284/304 [00:00<00:00, 2827.58it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2740.59it/s]\n2024-12-29 23:23:29.519 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 23:23:29.519 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 57%|█████▋ | 174/304 [00:00<00:00, 1738.85it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 1872.96it/s]\n2024-12-29 23:23:29.682 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.27s\nfree=28952881082368\nDownloading weights\n2024-12-29T23:23:29Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpk8d3nglo/weights url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar\n2024-12-29T23:23:32Z | INFO | [ Complete ] dest=/tmp/tmpk8d3nglo/weights size=\"194 MB\" total_elapsed=2.617s url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar\nDownloaded weights in 2.64s\n2024-12-29 23:23:32.327 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/5d8f5ed1fc490125\n2024-12-29 23:23:32.401 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-29 23:23:32.401 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 23:23:32.401 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 98%|█████████▊| 298/304 [00:00<00:00, 2975.65it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2933.14it/s]\n2024-12-29 23:23:32.505 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s\nUsing seed: 62141\n0it [00:00, ?it/s]\n1it [00:00, 8.36it/s]\n2it [00:00, 5.85it/s]\n3it [00:00, 5.33it/s]\n4it [00:00, 5.13it/s]\n5it [00:00, 5.00it/s]\n6it [00:01, 4.90it/s]\n7it [00:01, 4.87it/s]\n8it [00:01, 4.85it/s]\n9it [00:01, 4.84it/s]\n10it [00:01, 4.83it/s]\n11it [00:02, 4.81it/s]\n12it [00:02, 4.81it/s]\n13it [00:02, 4.81it/s]\n14it [00:02, 4.80it/s]\n15it [00:03, 4.80it/s]\n16it [00:03, 4.79it/s]\n17it [00:03, 4.78it/s]\n18it [00:03, 4.79it/s]\n19it [00:03, 4.80it/s]\n20it [00:04, 4.80it/s]\n21it [00:04, 4.79it/s]\n22it [00:04, 4.80it/s]\n23it [00:04, 4.79it/s]\n24it [00:04, 4.79it/s]\n25it [00:05, 4.80it/s]\n26it [00:05, 4.78it/s]\n27it [00:05, 4.79it/s]\n28it [00:05, 4.79it/s]\n28it [00:05, 4.87it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 9.242754967, "total_time": 16.189648 }, "output": [ "https://replicate.delivery/xezq/NhI3eUBcdpVKIql2eF4qd1uILuPEMeb8fYzo9OFQIdvqfFffJA/out-0.webp" ], "started_at": "2024-12-29T23:23:29.407893Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-jk7ingbprmemmec3y76f5w4cxnxxw6wgkjamyu2rguqva2k7mkoq", "get": "https://api.replicate.com/v1/predictions/k21txghvvnrmc0cm2jxrqnsp6g", "cancel": "https://api.replicate.com/v1/predictions/k21txghvvnrmc0cm2jxrqnsp6g/cancel" }, "version": "1d0682383deea73419e29f8d3446e1ea328e13cd1796d2cc4b5a262cff2688df" }
Generated in2024-12-29 23:23:29.407 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-29 23:23:29.407 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 93%|█████████▎| 284/304 [00:00<00:00, 2827.58it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2740.59it/s] 2024-12-29 23:23:29.519 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-29 23:23:29.519 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 57%|█████▋ | 174/304 [00:00<00:00, 1738.85it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 1872.96it/s] 2024-12-29 23:23:29.682 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.27s free=28952881082368 Downloading weights 2024-12-29T23:23:29Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmpk8d3nglo/weights url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar 2024-12-29T23:23:32Z | INFO | [ Complete ] dest=/tmp/tmpk8d3nglo/weights size="194 MB" total_elapsed=2.617s url=https://replicate.delivery/xezq/3Nez9CcDXBwgIq3do3zAIF4IRtma4kigidjZNOctf1mGG3fnA/trained_model.tar Downloaded weights in 2.64s 2024-12-29 23:23:32.327 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/5d8f5ed1fc490125 2024-12-29 23:23:32.401 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded 2024-12-29 23:23:32.401 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys 2024-12-29 23:23:32.401 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 98%|█████████▊| 298/304 [00:00<00:00, 2975.65it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2933.14it/s] 2024-12-29 23:23:32.505 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s Using seed: 62141 0it [00:00, ?it/s] 1it [00:00, 8.36it/s] 2it [00:00, 5.85it/s] 3it [00:00, 5.33it/s] 4it [00:00, 5.13it/s] 5it [00:00, 5.00it/s] 6it [00:01, 4.90it/s] 7it [00:01, 4.87it/s] 8it [00:01, 4.85it/s] 9it [00:01, 4.84it/s] 10it [00:01, 4.83it/s] 11it [00:02, 4.81it/s] 12it [00:02, 4.81it/s] 13it [00:02, 4.81it/s] 14it [00:02, 4.80it/s] 15it [00:03, 4.80it/s] 16it [00:03, 4.79it/s] 17it [00:03, 4.78it/s] 18it [00:03, 4.79it/s] 19it [00:03, 4.80it/s] 20it [00:04, 4.80it/s] 21it [00:04, 4.79it/s] 22it [00:04, 4.80it/s] 23it [00:04, 4.79it/s] 24it [00:04, 4.79it/s] 25it [00:05, 4.80it/s] 26it [00:05, 4.78it/s] 27it [00:05, 4.79it/s] 28it [00:05, 4.79it/s] 28it [00:05, 4.87it/s] Total safe images: 1 out of 1
Want to make some of these yourself?
Run this model