vetkastar / comfy-flux
comfy with flux model,
- Public
- 173K runs
-
A100 (80GB)
- License
Prediction
vetkastar/comfy-flux:3c5d220c265c5c8fdcadfd3efbc256a688673fcc23a1d50745ca5c1e74ae2a7dIDkjc5bmtm89rj60chrv7ahv30q0StatusSucceededSourceWebHardwareA100 (80GB)Total durationCreatedInput
- lora_urls
- output_format
- png
- workflow_json
- { "6": { "inputs": { "text": "flowers with magic waves and circles", "clip": [ "11", 0 ] }, "class_type": "CLIPTextEncode", "_meta": { "title": "CLIP Text Encode (Positive Prompt)" } }, "8": { "inputs": { "samples": [ "13", 0 ], "vae": [ "10", 0 ] }, "class_type": "VAEDecode", "_meta": { "title": "VAE Decode" } }, "9": { "inputs": { "filename_prefix": "ComfyUI", "images": [ "8", 0 ] }, "class_type": "SaveImage", "_meta": { "title": "Save Image" } }, "10": { "inputs": { "vae_name": "ae.safetensors" }, "class_type": "VAELoader", "_meta": { "title": "Load VAE" } }, "11": { "inputs": { "clip_name1": "t5xxl_fp16.safetensors", "clip_name2": "clip_l.safetensors", "type": "flux" }, "class_type": "DualCLIPLoader", "_meta": { "title": "DualCLIPLoader" } }, "12": { "inputs": { "unet_name": "flux1-dev.safetensors", "weight_dtype": "default" }, "class_type": "UNETLoader", "_meta": { "title": "Load Diffusion Model" } }, "13": { "inputs": { "noise": [ "25", 0 ], "guider": [ "22", 0 ], "sampler": [ "16", 0 ], "sigmas": [ "17", 0 ], "latent_image": [ "27", 0 ] }, "class_type": "SamplerCustomAdvanced", "_meta": { "title": "SamplerCustomAdvanced" } }, "16": { "inputs": { "sampler_name": "euler" }, "class_type": "KSamplerSelect", "_meta": { "title": "KSamplerSelect" } }, "17": { "inputs": { "scheduler": "simple", "steps": 20, "denoise": 1, "model": [ "30", 0 ] }, "class_type": "BasicScheduler", "_meta": { "title": "BasicScheduler" } }, "22": { "inputs": { "model": [ "30", 0 ], "conditioning": [ "26", 0 ] }, "class_type": "BasicGuider", "_meta": { "title": "BasicGuider" } }, "25": { "inputs": { "noise_seed": 219670278747233 }, "class_type": "RandomNoise", "_meta": { "title": "RandomNoise" } }, "26": { "inputs": { "guidance": 3.5, "conditioning": [ "6", 0 ] }, "class_type": "FluxGuidance", "_meta": { "title": "FluxGuidance" } }, "27": { "inputs": { "width": 1024, "height": 1024, "batch_size": 1 }, "class_type": "EmptySD3LatentImage", "_meta": { "title": "EmptySD3LatentImage" } }, "30": { "inputs": { "max_shift": 1.15, "base_shift": 0.5, "width": 1024, "height": 1024, "model": [ "12", 0 ] }, "class_type": "ModelSamplingFlux", "_meta": { "title": "ModelSamplingFlux" } } }
- output_quality
- 80
- randomise_seeds
- force_reset_cache
- return_temp_files
{ "lora_urls": "", "output_format": "png", "workflow_json": "{\n \"6\": {\n \"inputs\": {\n \"text\": \"flowers with magic waves and circles\",\n \"clip\": [\n \"11\",\n 0\n ]\n },\n \"class_type\": \"CLIPTextEncode\",\n \"_meta\": {\n \"title\": \"CLIP Text Encode (Positive Prompt)\"\n }\n },\n \"8\": {\n \"inputs\": {\n \"samples\": [\n \"13\",\n 0\n ],\n \"vae\": [\n \"10\",\n 0\n ]\n },\n \"class_type\": \"VAEDecode\",\n \"_meta\": {\n \"title\": \"VAE Decode\"\n }\n },\n \"9\": {\n \"inputs\": {\n \"filename_prefix\": \"ComfyUI\",\n \"images\": [\n \"8\",\n 0\n ]\n },\n \"class_type\": \"SaveImage\",\n \"_meta\": {\n \"title\": \"Save Image\"\n }\n },\n \"10\": {\n \"inputs\": {\n \"vae_name\": \"ae.safetensors\"\n },\n \"class_type\": \"VAELoader\",\n \"_meta\": {\n \"title\": \"Load VAE\"\n }\n },\n \"11\": {\n \"inputs\": {\n \"clip_name1\": \"t5xxl_fp16.safetensors\",\n \"clip_name2\": \"clip_l.safetensors\",\n \"type\": \"flux\"\n },\n \"class_type\": \"DualCLIPLoader\",\n \"_meta\": {\n \"title\": \"DualCLIPLoader\"\n }\n },\n \"12\": {\n \"inputs\": {\n \"unet_name\": \"flux1-dev.safetensors\",\n \"weight_dtype\": \"default\"\n },\n \"class_type\": \"UNETLoader\",\n \"_meta\": {\n \"title\": \"Load Diffusion Model\"\n }\n },\n \"13\": {\n \"inputs\": {\n \"noise\": [\n \"25\",\n 0\n ],\n \"guider\": [\n \"22\",\n 0\n ],\n \"sampler\": [\n \"16\",\n 0\n ],\n \"sigmas\": [\n \"17\",\n 0\n ],\n \"latent_image\": [\n \"27\",\n 0\n ]\n },\n \"class_type\": \"SamplerCustomAdvanced\",\n \"_meta\": {\n \"title\": \"SamplerCustomAdvanced\"\n }\n },\n \"16\": {\n \"inputs\": {\n \"sampler_name\": \"euler\"\n },\n \"class_type\": \"KSamplerSelect\",\n \"_meta\": {\n \"title\": \"KSamplerSelect\"\n }\n },\n \"17\": {\n \"inputs\": {\n \"scheduler\": \"simple\",\n \"steps\": 20,\n \"denoise\": 1,\n \"model\": [\n \"30\",\n 0\n ]\n },\n \"class_type\": \"BasicScheduler\",\n \"_meta\": {\n \"title\": \"BasicScheduler\"\n }\n },\n \"22\": {\n \"inputs\": {\n \"model\": [\n \"30\",\n 0\n ],\n \"conditioning\": [\n \"26\",\n 0\n ]\n },\n \"class_type\": \"BasicGuider\",\n \"_meta\": {\n \"title\": \"BasicGuider\"\n }\n },\n \"25\": {\n \"inputs\": {\n \"noise_seed\": 219670278747233\n },\n \"class_type\": \"RandomNoise\",\n \"_meta\": {\n \"title\": \"RandomNoise\"\n }\n },\n \"26\": {\n \"inputs\": {\n \"guidance\": 3.5,\n \"conditioning\": [\n \"6\",\n 0\n ]\n },\n \"class_type\": \"FluxGuidance\",\n \"_meta\": {\n \"title\": \"FluxGuidance\"\n }\n },\n \"27\": {\n \"inputs\": {\n \"width\": 1024,\n \"height\": 1024,\n \"batch_size\": 1\n },\n \"class_type\": \"EmptySD3LatentImage\",\n \"_meta\": {\n \"title\": \"EmptySD3LatentImage\"\n }\n },\n \"30\": {\n \"inputs\": {\n \"max_shift\": 1.15,\n \"base_shift\": 0.5,\n \"width\": 1024,\n \"height\": 1024,\n \"model\": [\n \"12\",\n 0\n ]\n },\n \"class_type\": \"ModelSamplingFlux\",\n \"_meta\": {\n \"title\": \"ModelSamplingFlux\"\n }\n }\n}", "output_quality": 80, "randomise_seeds": true, "force_reset_cache": false, "return_temp_files": false }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run vetkastar/comfy-flux using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "vetkastar/comfy-flux:3c5d220c265c5c8fdcadfd3efbc256a688673fcc23a1d50745ca5c1e74ae2a7d", { input: { lora_urls: "", output_format: "png", workflow_json: "{\n \"6\": {\n \"inputs\": {\n \"text\": \"flowers with magic waves and circles\",\n \"clip\": [\n \"11\",\n 0\n ]\n },\n \"class_type\": \"CLIPTextEncode\",\n \"_meta\": {\n \"title\": \"CLIP Text Encode (Positive Prompt)\"\n }\n },\n \"8\": {\n \"inputs\": {\n \"samples\": [\n \"13\",\n 0\n ],\n \"vae\": [\n \"10\",\n 0\n ]\n },\n \"class_type\": \"VAEDecode\",\n \"_meta\": {\n \"title\": \"VAE Decode\"\n }\n },\n \"9\": {\n \"inputs\": {\n \"filename_prefix\": \"ComfyUI\",\n \"images\": [\n \"8\",\n 0\n ]\n },\n \"class_type\": \"SaveImage\",\n \"_meta\": {\n \"title\": \"Save Image\"\n }\n },\n \"10\": {\n \"inputs\": {\n \"vae_name\": \"ae.safetensors\"\n },\n \"class_type\": \"VAELoader\",\n \"_meta\": {\n \"title\": \"Load VAE\"\n }\n },\n \"11\": {\n \"inputs\": {\n \"clip_name1\": \"t5xxl_fp16.safetensors\",\n \"clip_name2\": \"clip_l.safetensors\",\n \"type\": \"flux\"\n },\n \"class_type\": \"DualCLIPLoader\",\n \"_meta\": {\n \"title\": \"DualCLIPLoader\"\n }\n },\n \"12\": {\n \"inputs\": {\n \"unet_name\": \"flux1-dev.safetensors\",\n \"weight_dtype\": \"default\"\n },\n \"class_type\": \"UNETLoader\",\n \"_meta\": {\n \"title\": \"Load Diffusion Model\"\n }\n },\n \"13\": {\n \"inputs\": {\n \"noise\": [\n \"25\",\n 0\n ],\n \"guider\": [\n \"22\",\n 0\n ],\n \"sampler\": [\n \"16\",\n 0\n ],\n \"sigmas\": [\n \"17\",\n 0\n ],\n \"latent_image\": [\n \"27\",\n 0\n ]\n },\n \"class_type\": \"SamplerCustomAdvanced\",\n \"_meta\": {\n \"title\": \"SamplerCustomAdvanced\"\n }\n },\n \"16\": {\n \"inputs\": {\n \"sampler_name\": \"euler\"\n },\n \"class_type\": \"KSamplerSelect\",\n \"_meta\": {\n \"title\": \"KSamplerSelect\"\n }\n },\n \"17\": {\n \"inputs\": {\n \"scheduler\": \"simple\",\n \"steps\": 20,\n \"denoise\": 1,\n \"model\": [\n \"30\",\n 0\n ]\n },\n \"class_type\": \"BasicScheduler\",\n \"_meta\": {\n \"title\": \"BasicScheduler\"\n }\n },\n \"22\": {\n \"inputs\": {\n \"model\": [\n \"30\",\n 0\n ],\n \"conditioning\": [\n \"26\",\n 0\n ]\n },\n \"class_type\": \"BasicGuider\",\n \"_meta\": {\n \"title\": \"BasicGuider\"\n }\n },\n \"25\": {\n \"inputs\": {\n \"noise_seed\": 219670278747233\n },\n \"class_type\": \"RandomNoise\",\n \"_meta\": {\n \"title\": \"RandomNoise\"\n }\n },\n \"26\": {\n \"inputs\": {\n \"guidance\": 3.5,\n \"conditioning\": [\n \"6\",\n 0\n ]\n },\n \"class_type\": \"FluxGuidance\",\n \"_meta\": {\n \"title\": \"FluxGuidance\"\n }\n },\n \"27\": {\n \"inputs\": {\n \"width\": 1024,\n \"height\": 1024,\n \"batch_size\": 1\n },\n \"class_type\": \"EmptySD3LatentImage\",\n \"_meta\": {\n \"title\": \"EmptySD3LatentImage\"\n }\n },\n \"30\": {\n \"inputs\": {\n \"max_shift\": 1.15,\n \"base_shift\": 0.5,\n \"width\": 1024,\n \"height\": 1024,\n \"model\": [\n \"12\",\n 0\n ]\n },\n \"class_type\": \"ModelSamplingFlux\",\n \"_meta\": {\n \"title\": \"ModelSamplingFlux\"\n }\n }\n}", output_quality: 80, randomise_seeds: true, force_reset_cache: false, return_temp_files: false } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run vetkastar/comfy-flux using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "vetkastar/comfy-flux:3c5d220c265c5c8fdcadfd3efbc256a688673fcc23a1d50745ca5c1e74ae2a7d", input={ "lora_urls": "", "output_format": "png", "workflow_json": "{\n \"6\": {\n \"inputs\": {\n \"text\": \"flowers with magic waves and circles\",\n \"clip\": [\n \"11\",\n 0\n ]\n },\n \"class_type\": \"CLIPTextEncode\",\n \"_meta\": {\n \"title\": \"CLIP Text Encode (Positive Prompt)\"\n }\n },\n \"8\": {\n \"inputs\": {\n \"samples\": [\n \"13\",\n 0\n ],\n \"vae\": [\n \"10\",\n 0\n ]\n },\n \"class_type\": \"VAEDecode\",\n \"_meta\": {\n \"title\": \"VAE Decode\"\n }\n },\n \"9\": {\n \"inputs\": {\n \"filename_prefix\": \"ComfyUI\",\n \"images\": [\n \"8\",\n 0\n ]\n },\n \"class_type\": \"SaveImage\",\n \"_meta\": {\n \"title\": \"Save Image\"\n }\n },\n \"10\": {\n \"inputs\": {\n \"vae_name\": \"ae.safetensors\"\n },\n \"class_type\": \"VAELoader\",\n \"_meta\": {\n \"title\": \"Load VAE\"\n }\n },\n \"11\": {\n \"inputs\": {\n \"clip_name1\": \"t5xxl_fp16.safetensors\",\n \"clip_name2\": \"clip_l.safetensors\",\n \"type\": \"flux\"\n },\n \"class_type\": \"DualCLIPLoader\",\n \"_meta\": {\n \"title\": \"DualCLIPLoader\"\n }\n },\n \"12\": {\n \"inputs\": {\n \"unet_name\": \"flux1-dev.safetensors\",\n \"weight_dtype\": \"default\"\n },\n \"class_type\": \"UNETLoader\",\n \"_meta\": {\n \"title\": \"Load Diffusion Model\"\n }\n },\n \"13\": {\n \"inputs\": {\n \"noise\": [\n \"25\",\n 0\n ],\n \"guider\": [\n \"22\",\n 0\n ],\n \"sampler\": [\n \"16\",\n 0\n ],\n \"sigmas\": [\n \"17\",\n 0\n ],\n \"latent_image\": [\n \"27\",\n 0\n ]\n },\n \"class_type\": \"SamplerCustomAdvanced\",\n \"_meta\": {\n \"title\": \"SamplerCustomAdvanced\"\n }\n },\n \"16\": {\n \"inputs\": {\n \"sampler_name\": \"euler\"\n },\n \"class_type\": \"KSamplerSelect\",\n \"_meta\": {\n \"title\": \"KSamplerSelect\"\n }\n },\n \"17\": {\n \"inputs\": {\n \"scheduler\": \"simple\",\n \"steps\": 20,\n \"denoise\": 1,\n \"model\": [\n \"30\",\n 0\n ]\n },\n \"class_type\": \"BasicScheduler\",\n \"_meta\": {\n \"title\": \"BasicScheduler\"\n }\n },\n \"22\": {\n \"inputs\": {\n \"model\": [\n \"30\",\n 0\n ],\n \"conditioning\": [\n \"26\",\n 0\n ]\n },\n \"class_type\": \"BasicGuider\",\n \"_meta\": {\n \"title\": \"BasicGuider\"\n }\n },\n \"25\": {\n \"inputs\": {\n \"noise_seed\": 219670278747233\n },\n \"class_type\": \"RandomNoise\",\n \"_meta\": {\n \"title\": \"RandomNoise\"\n }\n },\n \"26\": {\n \"inputs\": {\n \"guidance\": 3.5,\n \"conditioning\": [\n \"6\",\n 0\n ]\n },\n \"class_type\": \"FluxGuidance\",\n \"_meta\": {\n \"title\": \"FluxGuidance\"\n }\n },\n \"27\": {\n \"inputs\": {\n \"width\": 1024,\n \"height\": 1024,\n \"batch_size\": 1\n },\n \"class_type\": \"EmptySD3LatentImage\",\n \"_meta\": {\n \"title\": \"EmptySD3LatentImage\"\n }\n },\n \"30\": {\n \"inputs\": {\n \"max_shift\": 1.15,\n \"base_shift\": 0.5,\n \"width\": 1024,\n \"height\": 1024,\n \"model\": [\n \"12\",\n 0\n ]\n },\n \"class_type\": \"ModelSamplingFlux\",\n \"_meta\": {\n \"title\": \"ModelSamplingFlux\"\n }\n }\n}", "output_quality": 80, "randomise_seeds": True, "force_reset_cache": False, "return_temp_files": False } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run vetkastar/comfy-flux using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "vetkastar/comfy-flux:3c5d220c265c5c8fdcadfd3efbc256a688673fcc23a1d50745ca5c1e74ae2a7d", "input": { "lora_urls": "", "output_format": "png", "workflow_json": "{\\n \\"6\\": {\\n \\"inputs\\": {\\n \\"text\\": \\"flowers with magic waves and circles\\",\\n \\"clip\\": [\\n \\"11\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"CLIPTextEncode\\",\\n \\"_meta\\": {\\n \\"title\\": \\"CLIP Text Encode (Positive Prompt)\\"\\n }\\n },\\n \\"8\\": {\\n \\"inputs\\": {\\n \\"samples\\": [\\n \\"13\\",\\n 0\\n ],\\n \\"vae\\": [\\n \\"10\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"VAEDecode\\",\\n \\"_meta\\": {\\n \\"title\\": \\"VAE Decode\\"\\n }\\n },\\n \\"9\\": {\\n \\"inputs\\": {\\n \\"filename_prefix\\": \\"ComfyUI\\",\\n \\"images\\": [\\n \\"8\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"SaveImage\\",\\n \\"_meta\\": {\\n \\"title\\": \\"Save Image\\"\\n }\\n },\\n \\"10\\": {\\n \\"inputs\\": {\\n \\"vae_name\\": \\"ae.safetensors\\"\\n },\\n \\"class_type\\": \\"VAELoader\\",\\n \\"_meta\\": {\\n \\"title\\": \\"Load VAE\\"\\n }\\n },\\n \\"11\\": {\\n \\"inputs\\": {\\n \\"clip_name1\\": \\"t5xxl_fp16.safetensors\\",\\n \\"clip_name2\\": \\"clip_l.safetensors\\",\\n \\"type\\": \\"flux\\"\\n },\\n \\"class_type\\": \\"DualCLIPLoader\\",\\n \\"_meta\\": {\\n \\"title\\": \\"DualCLIPLoader\\"\\n }\\n },\\n \\"12\\": {\\n \\"inputs\\": {\\n \\"unet_name\\": \\"flux1-dev.safetensors\\",\\n \\"weight_dtype\\": \\"default\\"\\n },\\n \\"class_type\\": \\"UNETLoader\\",\\n \\"_meta\\": {\\n \\"title\\": \\"Load Diffusion Model\\"\\n }\\n },\\n \\"13\\": {\\n \\"inputs\\": {\\n \\"noise\\": [\\n \\"25\\",\\n 0\\n ],\\n \\"guider\\": [\\n \\"22\\",\\n 0\\n ],\\n \\"sampler\\": [\\n \\"16\\",\\n 0\\n ],\\n \\"sigmas\\": [\\n \\"17\\",\\n 0\\n ],\\n \\"latent_image\\": [\\n \\"27\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"SamplerCustomAdvanced\\",\\n \\"_meta\\": {\\n \\"title\\": \\"SamplerCustomAdvanced\\"\\n }\\n },\\n \\"16\\": {\\n \\"inputs\\": {\\n \\"sampler_name\\": \\"euler\\"\\n },\\n \\"class_type\\": \\"KSamplerSelect\\",\\n \\"_meta\\": {\\n \\"title\\": \\"KSamplerSelect\\"\\n }\\n },\\n \\"17\\": {\\n \\"inputs\\": {\\n \\"scheduler\\": \\"simple\\",\\n \\"steps\\": 20,\\n \\"denoise\\": 1,\\n \\"model\\": [\\n \\"30\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"BasicScheduler\\",\\n \\"_meta\\": {\\n \\"title\\": \\"BasicScheduler\\"\\n }\\n },\\n \\"22\\": {\\n \\"inputs\\": {\\n \\"model\\": [\\n \\"30\\",\\n 0\\n ],\\n \\"conditioning\\": [\\n \\"26\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"BasicGuider\\",\\n \\"_meta\\": {\\n \\"title\\": \\"BasicGuider\\"\\n }\\n },\\n \\"25\\": {\\n \\"inputs\\": {\\n \\"noise_seed\\": 219670278747233\\n },\\n \\"class_type\\": \\"RandomNoise\\",\\n \\"_meta\\": {\\n \\"title\\": \\"RandomNoise\\"\\n }\\n },\\n \\"26\\": {\\n \\"inputs\\": {\\n \\"guidance\\": 3.5,\\n \\"conditioning\\": [\\n \\"6\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"FluxGuidance\\",\\n \\"_meta\\": {\\n \\"title\\": \\"FluxGuidance\\"\\n }\\n },\\n \\"27\\": {\\n \\"inputs\\": {\\n \\"width\\": 1024,\\n \\"height\\": 1024,\\n \\"batch_size\\": 1\\n },\\n \\"class_type\\": \\"EmptySD3LatentImage\\",\\n \\"_meta\\": {\\n \\"title\\": \\"EmptySD3LatentImage\\"\\n }\\n },\\n \\"30\\": {\\n \\"inputs\\": {\\n \\"max_shift\\": 1.15,\\n \\"base_shift\\": 0.5,\\n \\"width\\": 1024,\\n \\"height\\": 1024,\\n \\"model\\": [\\n \\"12\\",\\n 0\\n ]\\n },\\n \\"class_type\\": \\"ModelSamplingFlux\\",\\n \\"_meta\\": {\\n \\"title\\": \\"ModelSamplingFlux\\"\\n }\\n }\\n}", "output_quality": 80, "randomise_seeds": true, "force_reset_cache": false, "return_temp_files": false } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-09-06T10:11:40.359567Z", "created_at": "2024-09-06T10:08:16.450000Z", "data_removed": false, "error": null, "id": "kjc5bmtm89rj60chrv7ahv30q0", "input": { "lora_urls": "", "output_format": "png", "workflow_json": "{\n \"6\": {\n \"inputs\": {\n \"text\": \"flowers with magic waves and circles\",\n \"clip\": [\n \"11\",\n 0\n ]\n },\n \"class_type\": \"CLIPTextEncode\",\n \"_meta\": {\n \"title\": \"CLIP Text Encode (Positive Prompt)\"\n }\n },\n \"8\": {\n \"inputs\": {\n \"samples\": [\n \"13\",\n 0\n ],\n \"vae\": [\n \"10\",\n 0\n ]\n },\n \"class_type\": \"VAEDecode\",\n \"_meta\": {\n \"title\": \"VAE Decode\"\n }\n },\n \"9\": {\n \"inputs\": {\n \"filename_prefix\": \"ComfyUI\",\n \"images\": [\n \"8\",\n 0\n ]\n },\n \"class_type\": \"SaveImage\",\n \"_meta\": {\n \"title\": \"Save Image\"\n }\n },\n \"10\": {\n \"inputs\": {\n \"vae_name\": \"ae.safetensors\"\n },\n \"class_type\": \"VAELoader\",\n \"_meta\": {\n \"title\": \"Load VAE\"\n }\n },\n \"11\": {\n \"inputs\": {\n \"clip_name1\": \"t5xxl_fp16.safetensors\",\n \"clip_name2\": \"clip_l.safetensors\",\n \"type\": \"flux\"\n },\n \"class_type\": \"DualCLIPLoader\",\n \"_meta\": {\n \"title\": \"DualCLIPLoader\"\n }\n },\n \"12\": {\n \"inputs\": {\n \"unet_name\": \"flux1-dev.safetensors\",\n \"weight_dtype\": \"default\"\n },\n \"class_type\": \"UNETLoader\",\n \"_meta\": {\n \"title\": \"Load Diffusion Model\"\n }\n },\n \"13\": {\n \"inputs\": {\n \"noise\": [\n \"25\",\n 0\n ],\n \"guider\": [\n \"22\",\n 0\n ],\n \"sampler\": [\n \"16\",\n 0\n ],\n \"sigmas\": [\n \"17\",\n 0\n ],\n \"latent_image\": [\n \"27\",\n 0\n ]\n },\n \"class_type\": \"SamplerCustomAdvanced\",\n \"_meta\": {\n \"title\": \"SamplerCustomAdvanced\"\n }\n },\n \"16\": {\n \"inputs\": {\n \"sampler_name\": \"euler\"\n },\n \"class_type\": \"KSamplerSelect\",\n \"_meta\": {\n \"title\": \"KSamplerSelect\"\n }\n },\n \"17\": {\n \"inputs\": {\n \"scheduler\": \"simple\",\n \"steps\": 20,\n \"denoise\": 1,\n \"model\": [\n \"30\",\n 0\n ]\n },\n \"class_type\": \"BasicScheduler\",\n \"_meta\": {\n \"title\": \"BasicScheduler\"\n }\n },\n \"22\": {\n \"inputs\": {\n \"model\": [\n \"30\",\n 0\n ],\n \"conditioning\": [\n \"26\",\n 0\n ]\n },\n \"class_type\": \"BasicGuider\",\n \"_meta\": {\n \"title\": \"BasicGuider\"\n }\n },\n \"25\": {\n \"inputs\": {\n \"noise_seed\": 219670278747233\n },\n \"class_type\": \"RandomNoise\",\n \"_meta\": {\n \"title\": \"RandomNoise\"\n }\n },\n \"26\": {\n \"inputs\": {\n \"guidance\": 3.5,\n \"conditioning\": [\n \"6\",\n 0\n ]\n },\n \"class_type\": \"FluxGuidance\",\n \"_meta\": {\n \"title\": \"FluxGuidance\"\n }\n },\n \"27\": {\n \"inputs\": {\n \"width\": 1024,\n \"height\": 1024,\n \"batch_size\": 1\n },\n \"class_type\": \"EmptySD3LatentImage\",\n \"_meta\": {\n \"title\": \"EmptySD3LatentImage\"\n }\n },\n \"30\": {\n \"inputs\": {\n \"max_shift\": 1.15,\n \"base_shift\": 0.5,\n \"width\": 1024,\n \"height\": 1024,\n \"model\": [\n \"12\",\n 0\n ]\n },\n \"class_type\": \"ModelSamplingFlux\",\n \"_meta\": {\n \"title\": \"ModelSamplingFlux\"\n }\n }\n}", "output_quality": 80, "randomise_seeds": true, "force_reset_cache": false, "return_temp_files": false }, "logs": "Checking inputs\n====================================\nChecking weights\n⏳ Downloading flux1-dev.safetensors to ComfyUI/models/diffusion_models\n✅ flux1-dev.safetensors downloaded to ComfyUI/models/diffusion_models in 12.05s, size: 22700.25MB\n⏳ Downloading clip_l.safetensors to ComfyUI/models/clip\n✅ clip_l.safetensors downloaded to ComfyUI/models/clip in 0.23s, size: 234.74MB\n⏳ Downloading t5xxl_fp16.safetensors to ComfyUI/models/clip\n✅ t5xxl_fp16.safetensors downloaded to ComfyUI/models/clip in 5.28s, size: 9334.41MB\n⏳ Downloading ae.safetensors to ComfyUI/models/vae\n✅ ae.safetensors downloaded to ComfyUI/models/vae in 0.30s, size: 319.77MB\n====================================\nRandomising noise_seed to 2599278736\nRunning workflow\ngot prompt\nExecuting node 10, title: Load VAE, class type: VAELoader\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nExecuting node 25, title: RandomNoise, class type: RandomNoise\nExecuting node 12, title: Load Diffusion Model, class type: UNETLoader\nmodel weight dtype torch.bfloat16, manual cast: None\nmodel_type FLUX\nExecuting node 30, title: ModelSamplingFlux, class type: ModelSamplingFlux\nExecuting node 11, title: DualCLIPLoader, class type: DualCLIPLoader\nclip missing: ['text_projection.weight']\nExecuting node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode\nRequested to load FluxClipModel_\nLoading 1 new model\nloaded completely 0.0 9319.23095703125 True\nExecuting node 26, title: FluxGuidance, class type: FluxGuidance\nExecuting node 22, title: BasicGuider, class type: BasicGuider\nExecuting node 16, title: KSamplerSelect, class type: KSamplerSelect\nExecuting node 17, title: BasicScheduler, class type: BasicScheduler\nExecuting node 27, title: EmptySD3LatentImage, class type: EmptySD3LatentImage\nRequested to load Flux\nLoading 1 new model\nExecuting node 13, title: SamplerCustomAdvanced, class type: SamplerCustomAdvanced\nloaded completely 0.0 22700.097778320312 True\n 0%| | 0/20 [00:00<?, ?it/s]\n 5%|▌ | 1/20 [00:00<00:06, 3.09it/s]\n 10%|█ | 2/20 [00:00<00:06, 2.64it/s]\n 15%|█▌ | 3/20 [00:01<00:06, 2.52it/s]\n 20%|██ | 4/20 [00:01<00:06, 2.46it/s]\n 25%|██▌ | 5/20 [00:01<00:06, 2.44it/s]\n 30%|███ | 6/20 [00:02<00:05, 2.42it/s]\n 35%|███▌ | 7/20 [00:02<00:05, 2.41it/s]\n 40%|████ | 8/20 [00:03<00:04, 2.40it/s]\n 45%|████▌ | 9/20 [00:03<00:04, 2.40it/s]\n 50%|█████ | 10/20 [00:04<00:04, 2.40it/s]\n 55%|█████▌ | 11/20 [00:04<00:03, 2.39it/s]\n 60%|██████ | 12/20 [00:04<00:03, 2.39it/s]\n 65%|██████▌ | 13/20 [00:05<00:02, 2.39it/s]\n 70%|███████ | 14/20 [00:05<00:02, 2.39it/s]\n 75%|███████▌ | 15/20 [00:06<00:02, 2.38it/s]\n 80%|████████ | 16/20 [00:06<00:01, 2.38it/s]\n 85%|████████▌ | 17/20 [00:07<00:01, 2.38it/s]\n 90%|█████████ | 18/20 [00:07<00:00, 2.38it/s]\n 95%|█████████▌| 19/20 [00:07<00:00, 2.38it/s]\n100%|██████████| 20/20 [00:08<00:00, 2.38it/s]\n100%|██████████| 20/20 [00:08<00:00, 2.41it/s]\nRequested to load AutoencodingEngine\nLoading 1 new model\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nloaded completely 0.0 159.87335777282715 True\nExecuting node 9, title: Save Image, class type: SaveImage\nPrompt executed in 17.86 seconds\noutputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}\n====================================\nComfyUI_00001_.png", "metrics": { "predict_time": 36.852174306, "total_time": 203.909567 }, "output": [ "https://replicate.delivery/yhqm/BjCtPGhaiioSLdd3wEvTeQ8JZqtGwcjfTTISw21d3HYbdIaTA/ComfyUI_00001_.png" ], "started_at": "2024-09-06T10:11:03.507393Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/kjc5bmtm89rj60chrv7ahv30q0", "cancel": "https://api.replicate.com/v1/predictions/kjc5bmtm89rj60chrv7ahv30q0/cancel" }, "version": "3c5d220c265c5c8fdcadfd3efbc256a688673fcc23a1d50745ca5c1e74ae2a7d" }
Generated inChecking inputs ==================================== Checking weights ⏳ Downloading flux1-dev.safetensors to ComfyUI/models/diffusion_models ✅ flux1-dev.safetensors downloaded to ComfyUI/models/diffusion_models in 12.05s, size: 22700.25MB ⏳ Downloading clip_l.safetensors to ComfyUI/models/clip ✅ clip_l.safetensors downloaded to ComfyUI/models/clip in 0.23s, size: 234.74MB ⏳ Downloading t5xxl_fp16.safetensors to ComfyUI/models/clip ✅ t5xxl_fp16.safetensors downloaded to ComfyUI/models/clip in 5.28s, size: 9334.41MB ⏳ Downloading ae.safetensors to ComfyUI/models/vae ✅ ae.safetensors downloaded to ComfyUI/models/vae in 0.30s, size: 319.77MB ==================================== Randomising noise_seed to 2599278736 Running workflow got prompt Executing node 10, title: Load VAE, class type: VAELoader Using pytorch attention in VAE Using pytorch attention in VAE Executing node 25, title: RandomNoise, class type: RandomNoise Executing node 12, title: Load Diffusion Model, class type: UNETLoader model weight dtype torch.bfloat16, manual cast: None model_type FLUX Executing node 30, title: ModelSamplingFlux, class type: ModelSamplingFlux Executing node 11, title: DualCLIPLoader, class type: DualCLIPLoader clip missing: ['text_projection.weight'] Executing node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode Requested to load FluxClipModel_ Loading 1 new model loaded completely 0.0 9319.23095703125 True Executing node 26, title: FluxGuidance, class type: FluxGuidance Executing node 22, title: BasicGuider, class type: BasicGuider Executing node 16, title: KSamplerSelect, class type: KSamplerSelect Executing node 17, title: BasicScheduler, class type: BasicScheduler Executing node 27, title: EmptySD3LatentImage, class type: EmptySD3LatentImage Requested to load Flux Loading 1 new model Executing node 13, title: SamplerCustomAdvanced, class type: SamplerCustomAdvanced loaded completely 0.0 22700.097778320312 True 0%| | 0/20 [00:00<?, ?it/s] 5%|▌ | 1/20 [00:00<00:06, 3.09it/s] 10%|█ | 2/20 [00:00<00:06, 2.64it/s] 15%|█▌ | 3/20 [00:01<00:06, 2.52it/s] 20%|██ | 4/20 [00:01<00:06, 2.46it/s] 25%|██▌ | 5/20 [00:01<00:06, 2.44it/s] 30%|███ | 6/20 [00:02<00:05, 2.42it/s] 35%|███▌ | 7/20 [00:02<00:05, 2.41it/s] 40%|████ | 8/20 [00:03<00:04, 2.40it/s] 45%|████▌ | 9/20 [00:03<00:04, 2.40it/s] 50%|█████ | 10/20 [00:04<00:04, 2.40it/s] 55%|█████▌ | 11/20 [00:04<00:03, 2.39it/s] 60%|██████ | 12/20 [00:04<00:03, 2.39it/s] 65%|██████▌ | 13/20 [00:05<00:02, 2.39it/s] 70%|███████ | 14/20 [00:05<00:02, 2.39it/s] 75%|███████▌ | 15/20 [00:06<00:02, 2.38it/s] 80%|████████ | 16/20 [00:06<00:01, 2.38it/s] 85%|████████▌ | 17/20 [00:07<00:01, 2.38it/s] 90%|█████████ | 18/20 [00:07<00:00, 2.38it/s] 95%|█████████▌| 19/20 [00:07<00:00, 2.38it/s] 100%|██████████| 20/20 [00:08<00:00, 2.38it/s] 100%|██████████| 20/20 [00:08<00:00, 2.41it/s] Requested to load AutoencodingEngine Loading 1 new model Executing node 8, title: VAE Decode, class type: VAEDecode loaded completely 0.0 159.87335777282715 True Executing node 9, title: Save Image, class type: SaveImage Prompt executed in 17.86 seconds outputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}} ==================================== ComfyUI_00001_.png
Want to make some of these yourself?
Run this model