growthmkt / prompt
- Public
- 546 runs
-
A100 (80GB)
Prediction
growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0IDwh2kouzbsopgy2uigee7ilvwfiStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- prompt
- a cat walking in midnight
- resolution
- 1024 x 1024 (square)
{ "prompt": "a cat walking in midnight ", "resolution": "1024 x 1024 (square)" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; import fs from "node:fs"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", { input: { prompt: "a cat walking in midnight ", resolution: "1024 x 1024 (square)" } } ); // To access the file URL: console.log(output.url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", input={ "prompt": "a cat walking in midnight ", "resolution": "1024 x 1024 (square)" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", "input": { "prompt": "a cat walking in midnight ", "resolution": "1024 x 1024 (square)" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-03-20T21:10:42.226980Z", "created_at": "2024-03-20T21:07:58.299397Z", "data_removed": false, "error": null, "id": "wh2kouzbsopgy2uigee7ilvwfi", "input": { "prompt": "a cat walking in midnight ", "resolution": "1024 x 1024 (square)" }, "logs": "Starting...\nNo seed - create brand new 7947036\nPrompting {'33': {'inputs': {'samples': ['38', 0], 'vae': ['37', 2]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '34': {'inputs': {'images': ['33', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '37': {'inputs': {'ckpt_name': 'Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors'}, 'class_type': 'CheckpointLoaderSimple', '_meta': {'title': 'Load Checkpoint'}}, '38': {'inputs': {'seed': 7947036, 'steps': 35, 'cfg': 7, 'sampler_name': 'dpmpp_2m_sde_gpu', 'scheduler': 'karras', 'denoise': 1, 'model': ['37', 0], 'positive': ['39', 0], 'negative': ['40', 0], 'latent_image': ['41', 0]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '39': {'inputs': {'text': 'a cat walking in midnight ', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '40': {'inputs': {'text': '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '41': {'inputs': {'dimensions': '1024 x 1024 (square)', 'clip_scale': 2, 'batch_size': 1}, 'class_type': 'SDXL Empty Latent Image (rgthree)', '_meta': {'title': 'SDXL Empty Latent Image (rgthree)'}}, '42': {'inputs': {'stop_at_clip_layer': -1, 'clip': ['37', 1]}, 'class_type': 'CLIPSetLastLayer', '_meta': {'title': 'CLIP Set Last Layer'}}}\ngot prompt\n\u001b[32m[rgthree] Using rgthree's optimized recursive execution.\u001b[0m\n\u001b[32m[rgthree]\u001b[0m First run patching recursive_output_delete_if_changed and recursive_will_execute.\u001b[0m\n\u001b[33m[rgthree] Note: \u001b[0mIf execution seems broken due to forward ComfyUI changes, you can disable the optimization from rgthree settings in ComfyUI.\u001b[0m\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight', 'clip_g.logit_scale']\nRequested to load SDXLClipModel\nLoading 1 new model\nRequested to load SDXL\nLoading 1 new model\n 0%| | 0/35 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=14.614643096923828 and t1=14.614643.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n 3%|▎ | 1/35 [00:00<00:18, 1.80it/s]\n 6%|▌ | 2/35 [00:00<00:10, 3.25it/s]\n 9%|▊ | 3/35 [00:00<00:07, 4.49it/s]\n 11%|█▏ | 4/35 [00:00<00:05, 5.55it/s]\n 14%|█▍ | 5/35 [00:01<00:04, 6.23it/s]\n 17%|█▋ | 6/35 [00:01<00:04, 6.87it/s]\n 20%|██ | 7/35 [00:01<00:03, 7.40it/s]\n 23%|██▎ | 8/35 [00:01<00:03, 7.78it/s]\n 26%|██▌ | 9/35 [00:01<00:03, 8.07it/s]\n 29%|██▊ | 10/35 [00:01<00:03, 8.25it/s]\n 31%|███▏ | 11/35 [00:01<00:02, 8.38it/s]\n 34%|███▍ | 12/35 [00:01<00:02, 8.49it/s]\n 37%|███▋ | 13/35 [00:01<00:02, 8.54it/s]\n 40%|████ | 14/35 [00:02<00:02, 8.51it/s]\n 43%|████▎ | 15/35 [00:02<00:02, 8.56it/s]\n 46%|████▌ | 16/35 [00:02<00:02, 8.62it/s]\n 49%|████▊ | 17/35 [00:02<00:02, 8.54it/s]\n 51%|█████▏ | 18/35 [00:02<00:01, 8.60it/s]\n 54%|█████▍ | 19/35 [00:02<00:01, 8.66it/s]\n 57%|█████▋ | 20/35 [00:02<00:01, 8.69it/s]\n 60%|██████ | 21/35 [00:02<00:01, 8.74it/s]\n 63%|██████▎ | 22/35 [00:03<00:01, 8.74it/s]\n 66%|██████▌ | 23/35 [00:03<00:01, 8.55it/s]\n 69%|██████▊ | 24/35 [00:03<00:01, 8.63it/s]\n 71%|███████▏ | 25/35 [00:03<00:01, 8.61it/s]\n 74%|███████▍ | 26/35 [00:03<00:01, 8.64it/s]\n 77%|███████▋ | 27/35 [00:03<00:00, 8.68it/s]\n 80%|████████ | 28/35 [00:03<00:00, 8.73it/s]\n 83%|████████▎ | 29/35 [00:03<00:00, 8.74it/s]\n 86%|████████▌ | 30/35 [00:03<00:00, 8.75it/s]\n 89%|████████▊ | 31/35 [00:04<00:00, 8.80it/s]\n 91%|█████████▏| 32/35 [00:04<00:00, 8.75it/s]\n 94%|█████████▍| 33/35 [00:04<00:00, 8.80it/s]\n 97%|█████████▋| 34/35 [00:04<00:00, 8.92it/s]\n100%|██████████| 35/35 [00:04<00:00, 9.07it/s]\n100%|██████████| 35/35 [00:04<00:00, 7.80it/s]\nRequested to load AutoencoderKL\nLoading 1 new model\nPrompt executed in 10.89 seconds\nnode output: {'images': [{'filename': 'ComfyUI_temp_rprkl_00001_.png', 'subfolder': '', 'type': 'temp'}]}\ntemp", "metrics": { "predict_time": 11.341973, "total_time": 163.927583 }, "output": "https://replicate.delivery/pbxt/pLb94hkVLW71HJGFFheuQuE2EfbR2v1acIrWB97BdKcRLQiSA/out-34.png", "started_at": "2024-03-20T21:10:30.885007Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/wh2kouzbsopgy2uigee7ilvwfi", "cancel": "https://api.replicate.com/v1/predictions/wh2kouzbsopgy2uigee7ilvwfi/cancel" }, "version": "3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0" }
Generated inStarting... No seed - create brand new 7947036 Prompting {'33': {'inputs': {'samples': ['38', 0], 'vae': ['37', 2]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '34': {'inputs': {'images': ['33', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '37': {'inputs': {'ckpt_name': 'Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors'}, 'class_type': 'CheckpointLoaderSimple', '_meta': {'title': 'Load Checkpoint'}}, '38': {'inputs': {'seed': 7947036, 'steps': 35, 'cfg': 7, 'sampler_name': 'dpmpp_2m_sde_gpu', 'scheduler': 'karras', 'denoise': 1, 'model': ['37', 0], 'positive': ['39', 0], 'negative': ['40', 0], 'latent_image': ['41', 0]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '39': {'inputs': {'text': 'a cat walking in midnight ', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '40': {'inputs': {'text': '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '41': {'inputs': {'dimensions': '1024 x 1024 (square)', 'clip_scale': 2, 'batch_size': 1}, 'class_type': 'SDXL Empty Latent Image (rgthree)', '_meta': {'title': 'SDXL Empty Latent Image (rgthree)'}}, '42': {'inputs': {'stop_at_clip_layer': -1, 'clip': ['37', 1]}, 'class_type': 'CLIPSetLastLayer', '_meta': {'title': 'CLIP Set Last Layer'}}} got prompt [rgthree] Using rgthree's optimized recursive execution. [rgthree] First run patching recursive_output_delete_if_changed and recursive_will_execute. [rgthree] Note: If execution seems broken due to forward ComfyUI changes, you can disable the optimization from rgthree settings in ComfyUI. model_type EPS Using pytorch attention in VAE Using pytorch attention in VAE clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight', 'clip_g.logit_scale'] Requested to load SDXLClipModel Loading 1 new model Requested to load SDXL Loading 1 new model 0%| | 0/35 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=14.614643096923828 and t1=14.614643. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 3%|▎ | 1/35 [00:00<00:18, 1.80it/s] 6%|▌ | 2/35 [00:00<00:10, 3.25it/s] 9%|▊ | 3/35 [00:00<00:07, 4.49it/s] 11%|█▏ | 4/35 [00:00<00:05, 5.55it/s] 14%|█▍ | 5/35 [00:01<00:04, 6.23it/s] 17%|█▋ | 6/35 [00:01<00:04, 6.87it/s] 20%|██ | 7/35 [00:01<00:03, 7.40it/s] 23%|██▎ | 8/35 [00:01<00:03, 7.78it/s] 26%|██▌ | 9/35 [00:01<00:03, 8.07it/s] 29%|██▊ | 10/35 [00:01<00:03, 8.25it/s] 31%|███▏ | 11/35 [00:01<00:02, 8.38it/s] 34%|███▍ | 12/35 [00:01<00:02, 8.49it/s] 37%|███▋ | 13/35 [00:01<00:02, 8.54it/s] 40%|████ | 14/35 [00:02<00:02, 8.51it/s] 43%|████▎ | 15/35 [00:02<00:02, 8.56it/s] 46%|████▌ | 16/35 [00:02<00:02, 8.62it/s] 49%|████▊ | 17/35 [00:02<00:02, 8.54it/s] 51%|█████▏ | 18/35 [00:02<00:01, 8.60it/s] 54%|█████▍ | 19/35 [00:02<00:01, 8.66it/s] 57%|█████▋ | 20/35 [00:02<00:01, 8.69it/s] 60%|██████ | 21/35 [00:02<00:01, 8.74it/s] 63%|██████▎ | 22/35 [00:03<00:01, 8.74it/s] 66%|██████▌ | 23/35 [00:03<00:01, 8.55it/s] 69%|██████▊ | 24/35 [00:03<00:01, 8.63it/s] 71%|███████▏ | 25/35 [00:03<00:01, 8.61it/s] 74%|███████▍ | 26/35 [00:03<00:01, 8.64it/s] 77%|███████▋ | 27/35 [00:03<00:00, 8.68it/s] 80%|████████ | 28/35 [00:03<00:00, 8.73it/s] 83%|████████▎ | 29/35 [00:03<00:00, 8.74it/s] 86%|████████▌ | 30/35 [00:03<00:00, 8.75it/s] 89%|████████▊ | 31/35 [00:04<00:00, 8.80it/s] 91%|█████████▏| 32/35 [00:04<00:00, 8.75it/s] 94%|█████████▍| 33/35 [00:04<00:00, 8.80it/s] 97%|█████████▋| 34/35 [00:04<00:00, 8.92it/s] 100%|██████████| 35/35 [00:04<00:00, 9.07it/s] 100%|██████████| 35/35 [00:04<00:00, 7.80it/s] Requested to load AutoencoderKL Loading 1 new model Prompt executed in 10.89 seconds node output: {'images': [{'filename': 'ComfyUI_temp_rprkl_00001_.png', 'subfolder': '', 'type': 'temp'}]} temp
Prediction
growthmkt/prompt:6b73f6a104543620e702ae49aa2f8cb3d1157c0e50dec4ec54bd67524b8d2f03IDaara5zrbc6r2axj3spt6lwp4puStatusSucceededSourceAPIHardwareA100 (40GB)Total durationCreatedInput
- seed
- 364190
- prompt
- (abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65
{ "seed": 364190, "prompt": "(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; import fs from "node:fs"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "growthmkt/prompt:6b73f6a104543620e702ae49aa2f8cb3d1157c0e50dec4ec54bd67524b8d2f03", { input: { seed: 364190, prompt: "(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65" } } ); // To access the file URL: console.log(output.url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "growthmkt/prompt:6b73f6a104543620e702ae49aa2f8cb3d1157c0e50dec4ec54bd67524b8d2f03", input={ "seed": 364190, "prompt": "(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "growthmkt/prompt:6b73f6a104543620e702ae49aa2f8cb3d1157c0e50dec4ec54bd67524b8d2f03", "input": { "seed": 364190, "prompt": "(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-03-18T14:23:19.238562Z", "created_at": "2024-03-18T14:19:34.358434Z", "data_removed": false, "error": null, "id": "aara5zrbc6r2axj3spt6lwp4pu", "input": { "seed": 364190, "prompt": "(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65" }, "logs": "Starting...\nPrompting {'37': {'inputs': {'vae_name': 'madebyollin_sdxl_vae.safetensors'}, 'class_type': 'VAELoader', '_meta': {'title': 'Load VAE'}}, '48': {'inputs': {'base_ckpt_name': 'RealVisXL_V4.0.safetensors', 'base_clip_skip': -1, 'refiner_ckpt_name': 'ProteusV0.5.2.safetensors', 'refiner_clip_skip': -1, 'positive_ascore': 6, 'negative_ascore': 2, 'vae_name': 'Baked VAE', 'positive': ['58', 0], 'negative': ['58', 1], 'token_normalization': 'none', 'weight_interpretation': 'A1111', 'empty_latent_width': ['49', 1], 'empty_latent_height': ['49', 2], 'batch_size': 1}, 'class_type': 'Eff. Loader SDXL', '_meta': {'title': 'Eff. Loader SDXL'}}, '49': {'inputs': {'resolution': '1024x1024 (1.0)', 'batch_size': 1}, 'class_type': 'SDXLEmptyLatentSizePicker+', '_meta': {'title': '🔧 SDXL Empty Latent Size Picker'}}, '52': {'inputs': {'sdxl_tuple': ['48', 0]}, 'class_type': 'Unpack SDXL Tuple', '_meta': {'title': 'Unpack SDXL Tuple'}}, '58': {'inputs': {'text_positive': '(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65', 'text_negative': '(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, embedding:unaestheticXL_Alb2, (worst quality, low quality, normal quality), disabled body, (ugly), sketches, (manicure:1.2), lowres, watermark', 'style': 'ads-fashion editorial', 'log_prompt': 'No', 'style_name': True}, 'class_type': 'SDXLPromptStyler', '_meta': {'title': 'SDXL Prompt Styler'}}, '60': {'inputs': {'samples': ['221', 0], 'vae': ['37', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '61': {'inputs': {'samples': ['219', 0], 'vae': ['37', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '72': {'inputs': {'b1': 1.3, 'b2': 1.4, 's1': 0.9, 's2': 0.2, 'model': ['169', 0]}, 'class_type': 'FreeU_V2', '_meta': {'title': 'FreeU_V2'}}, '76': {'inputs': {'ratio': 0.5, 'model1': ['52', 0], 'model2': ['52', 4]}, 'class_type': 'ModelMergeSimple', '_meta': {'title': 'ModelMergeSimple'}}, '91': {'inputs': {'width': ['49', 1], 'height': ['49', 2], 'batch_size': 1}, 'class_type': 'EmptyLatentImage', '_meta': {'title': 'Kohya HiRes'}}, '98': {'inputs': {'seed': 364190}, 'class_type': 'Seed (rgthree)', '_meta': {'title': 'Seed (rgthree)'}}, '105': {'inputs': {'brightness': -0.05, 'contrast': 1, 'saturation': 1, 'sharpness': 1, 'blur': 0, 'gaussian_blur': 0, 'edge_enhance': 0, 'detail_enhance': 'true', 'image': ['61', 0]}, 'class_type': 'Image Filter Adjustments', '_meta': {'title': 'Image Filter Adjustments'}}, '108': {'inputs': {'model': ['72', 0], 'clip': ['223', 1], 'vae': ['37', 0], 'positive': ['52', 6], 'negative': ['52', 7]}, 'class_type': 'ToBasicPipe', '_meta': {'title': 'ToBasicPipe'}}, '160': {'inputs': {'model_name': '1x-ITF-SkinDiffDetail-Lite-v1.pth'}, 'class_type': 'UpscaleModelLoader', '_meta': {'title': 'Load Upscale Model'}}, '161': {'inputs': {'upscale_model': ['160', 0], 'image': ['105', 0]}, 'class_type': 'ImageUpscaleWithModel', '_meta': {'title': 'Upscale Image (using Model)'}}, '167': {'inputs': {'block_number': 6, 'downscale_factor': 2, 'start_percent': 0, 'end_percent': 0.35, 'downscale_after_skip': True, 'downscale_method': 'bicubic', 'upscale_method': 'bicubic', 'model': ['223', 0]}, 'class_type': 'PatchModelAddDownscale', '_meta': {'title': 'PatchModelAddDownscale (Kohya Deep Shrink)'}}, '169': {'inputs': {'scale': 0.5, 'blur_sigma': 2, 'model': ['167', 0]}, 'class_type': 'SelfAttentionGuidance', '_meta': {'title': 'Self-Attention Guidance'}}, '180': {'inputs': {'basic_pipe': ['108', 0]}, 'class_type': 'FromBasicPipe', '_meta': {'title': 'FromBasicPipe'}}, '219': {'inputs': {'add_noise': 'disable', 'noise_seed': ['98', 0], 'steps': 20, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'sgm_uniform', 'start_at_step': 12, 'end_at_step': 10000, 'return_with_leftover_noise': 'disable', 'sharpness': 2, 'model': ['72', 0], 'positive': ['52', 6], 'negative': ['52', 7], 'latent_image': ['221', 0]}, 'class_type': 'Fooocus_KSamplerAdvanced', '_meta': {'title': 'KSampler Adv. Fooocus'}}, '221': {'inputs': {'add_noise': 'enable', 'noise_seed': ['98', 0], 'steps': 20, 'cfg': 12, 'sampler_name': 'euler', 'scheduler': 'simple', 'start_at_step': 0, 'end_at_step': 12, 'return_with_leftover_noise': 'enable', 'sharpness': 2, 'model': ['52', 4], 'positive': ['52', 2], 'negative': ['52', 3], 'latent_image': ['91', 0]}, 'class_type': 'Fooocus_KSamplerAdvanced', '_meta': {'title': 'KSampler Adv. Fooocus'}}, '222': {'inputs': {'input_mode': 'simple', 'lora_count': 4, 'lora_name_1': 'sdxl_lightning_8step_lora.safetensors', 'lora_wt_1': 1, 'model_str_1': 1, 'clip_str_1': 1, 'lora_name_2': 'faces_v3.safetensors', 'lora_wt_2': 1, 'model_str_2': 1, 'clip_str_2': 1, 'lora_name_3': 'resolution_lora.safetensors', 'lora_wt_3': 1, 'model_str_3': 1, 'clip_str_3': 1, 'lora_name_4': 'XL_fix_hands.safetensors', 'lora_wt_4': 1.5, 'model_str_4': 1, 'clip_str_4': 1, 'lora_name_5': 'None', 'lora_wt_5': 1, 'model_str_5': 1, 'clip_str_5': 1, 'lora_name_6': 'faces_v3.safetensors', 'lora_wt_6': 1, 'model_str_6': 1, 'clip_str_6': 1, 'lora_name_7': 'None', 'lora_wt_7': 1, 'model_str_7': 1, 'clip_str_7': 1, 'lora_name_8': 'None', 'lora_wt_8': 1, 'model_str_8': 1, 'clip_str_8': 1, 'lora_name_9': 'None', 'lora_wt_9': 1, 'model_str_9': 1, 'clip_str_9': 1, 'lora_name_10': 'None', 'lora_wt_10': 1, 'model_str_10': 1, 'clip_str_10': 1, 'lora_name_11': 'None', 'lora_wt_11': 1, 'model_str_11': 1, 'clip_str_11': 1, 'lora_name_12': 'None', 'lora_wt_12': 1, 'model_str_12': 1, 'clip_str_12': 1, 'lora_name_13': 'None', 'lora_wt_13': 1, 'model_str_13': 1, 'clip_str_13': 1, 'lora_name_14': 'None', 'lora_wt_14': 1, 'model_str_14': 1, 'clip_str_14': 1, 'lora_name_15': 'None', 'lora_wt_15': 1, 'model_str_15': 1, 'clip_str_15': 1, 'lora_name_16': 'None', 'lora_wt_16': 1, 'model_str_16': 1, 'clip_str_16': 1, 'lora_name_17': 'None', 'lora_wt_17': 1, 'model_str_17': 1, 'clip_str_17': 1, 'lora_name_18': 'None', 'lora_wt_18': 1, 'model_str_18': 1, 'clip_str_18': 1, 'lora_name_19': 'None', 'lora_wt_19': 1, 'model_str_19': 1, 'clip_str_19': 1, 'lora_name_20': 'None', 'lora_wt_20': 1, 'model_str_20': 1, 'clip_str_20': 1, 'lora_name_21': 'None', 'lora_wt_21': 1, 'model_str_21': 1, 'clip_str_21': 1, 'lora_name_22': 'None', 'lora_wt_22': 1, 'model_str_22': 1, 'clip_str_22': 1, 'lora_name_23': 'None', 'lora_wt_23': 1, 'model_str_23': 1, 'clip_str_23': 1, 'lora_name_24': 'None', 'lora_wt_24': 1, 'model_str_24': 1, 'clip_str_24': 1, 'lora_name_25': 'None', 'lora_wt_25': 1, 'model_str_25': 1, 'clip_str_25': 1, 'lora_name_26': 'None', 'lora_wt_26': 1, 'model_str_26': 1, 'clip_str_26': 1, 'lora_name_27': 'None', 'lora_wt_27': 1, 'model_str_27': 1, 'clip_str_27': 1, 'lora_name_28': 'None', 'lora_wt_28': 1, 'model_str_28': 1, 'clip_str_28': 1, 'lora_name_29': 'None', 'lora_wt_29': 1, 'model_str_29': 1, 'clip_str_29': 1, 'lora_name_30': 'None', 'lora_wt_30': 1, 'model_str_30': 1, 'clip_str_30': 1, 'lora_name_31': 'None', 'lora_wt_31': 1, 'model_str_31': 1, 'clip_str_31': 1, 'lora_name_32': 'None', 'lora_wt_32': 1, 'model_str_32': 1, 'clip_str_32': 1, 'lora_name_33': 'None', 'lora_wt_33': 1, 'model_str_33': 1, 'clip_str_33': 1, 'lora_name_34': 'None', 'lora_wt_34': 1, 'model_str_34': 1, 'clip_str_34': 1, 'lora_name_35': 'None', 'lora_wt_35': 1, 'model_str_35': 1, 'clip_str_35': 1, 'lora_name_36': 'None', 'lora_wt_36': 1, 'model_str_36': 1, 'clip_str_36': 1, 'lora_name_37': 'None', 'lora_wt_37': 1, 'model_str_37': 1, 'clip_str_37': 1, 'lora_name_38': 'None', 'lora_wt_38': 1, 'model_str_38': 1, 'clip_str_38': 1, 'lora_name_39': 'None', 'lora_wt_39': 1, 'model_str_39': 1, 'clip_str_39': 1, 'lora_name_40': 'None', 'lora_wt_40': 1, 'model_str_40': 1, 'clip_str_40': 1, 'lora_name_41': 'None', 'lora_wt_41': 1, 'model_str_41': 1, 'clip_str_41': 1, 'lora_name_42': 'None', 'lora_wt_42': 1, 'model_str_42': 1, 'clip_str_42': 1, 'lora_name_43': 'None', 'lora_wt_43': 1, 'model_str_43': 1, 'clip_str_43': 1, 'lora_name_44': 'None', 'lora_wt_44': 1, 'model_str_44': 1, 'clip_str_44': 1, 'lora_name_45': 'None', 'lora_wt_45': 1, 'model_str_45': 1, 'clip_str_45': 1, 'lora_name_46': 'None', 'lora_wt_46': 1, 'model_str_46': 1, 'clip_str_46': 1, 'lora_name_47': 'None', 'lora_wt_47': 1, 'model_str_47': 1, 'clip_str_47': 1, 'lora_name_48': 'None', 'lora_wt_48': 1, 'model_str_48': 1, 'clip_str_48': 1, 'lora_name_49': 'None', 'lora_wt_49': 1, 'model_str_49': 1, 'clip_str_49': 1}, 'class_type': 'LoRA Stacker', '_meta': {'title': 'LoRA Stacker'}}, '223': {'inputs': {'model': ['250', 0], 'clip': ['52', 5], 'lora_stack': ['222', 0]}, 'class_type': 'CR Apply LoRA Stack', '_meta': {'title': '💊 CR Apply LoRA Stack'}}, '250': {'inputs': {'downsample_factor_depth_1': 2, 'downsample_factor_depth_2': 1, 'model': ['76', 0]}, 'class_type': 'ToDoPatchModel', '_meta': {'title': 'ToDo: Token Dowsampling'}}, '268': {'inputs': {'model_name': 'bbox/hand_yolov8s.pt'}, 'class_type': 'UltralyticsDetectorProvider', '_meta': {'title': 'UltralyticsDetectorProvider'}}, '269': {'inputs': {'bbox_threshold': 0.4, 'bbox_dilation': 0, 'crop_factor': 3, 'drop_size': 10, 'sub_threshold': 0.5, 'sub_dilation': 0, 'sub_bbox_expansion': 0, 'sam_mask_hint_threshold': 0.7, 'post_dilation': 0, 'bbox_detector': ['268', 0], 'image': ['161', 0]}, 'class_type': 'ImpactSimpleDetectorSEGS', '_meta': {'title': 'Simple Detector (SEGS)'}}, '271': {'inputs': {'segs': ['269', 0]}, 'class_type': 'SegsToCombinedMask', '_meta': {'title': 'SEGS to MASK (combined)'}}, '272': {'inputs': {'masks': ['271', 0]}, 'class_type': 'Convert Masks to Images', '_meta': {'title': 'Convert Masks to Images'}}, '276': {'inputs': {'text': 'ultra detailed, intricate details, cinematic, masterpiece, 8k, 4k, UHD', 'clip': ['180', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '277': {'inputs': {'text': 'ugly, deformed, embedding:unaestheticXL_Alb2, ', 'clip': ['180', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '280': {'inputs': {'guide_size': 384, 'guide_size_for': True, 'max_size': 1024, 'seed': 393669993784450, 'steps': 12, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'normal', 'denoise': 0.5, 'feather': 5, 'noise_mask': True, 'force_inpaint': True, 'wildcard': '<lora:XL_fix_hands:1.5>, <lora:DetailedEyes_V3:1.0>', 'cycle': 1, 'inpaint_model': False, 'noise_mask_feather': 20, 'image': ['161', 0], 'segs': ['269', 0], 'model': ['180', 0], 'clip': ['180', 1], 'vae': ['180', 2], 'positive': ['276', 0], 'negative': ['277', 0]}, 'class_type': 'DetailerForEachDebug', '_meta': {'title': 'DetailerDebug (SEGS)'}}, '288': {'inputs': {'guide_size': 384, 'guide_size_for': True, 'max_size': 1024, 'seed': 500442845647801, 'steps': 12, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'normal', 'denoise': 0.5, 'feather': 5, 'noise_mask': True, 'force_inpaint': True, 'wildcard': '<lora:XL_fix_hands:1.5>, <lora:DetailedEyes_V3:1.0>', 'cycle': 1, 'inpaint_model': False, 'noise_mask_feather': 20, 'image': ['280', 0], 'segs': ['269', 0], 'model': ['180', 0], 'clip': ['180', 1], 'vae': ['180', 2], 'positive': ['276', 0], 'negative': ['277', 0]}, 'class_type': 'DetailerForEachDebug', '_meta': {'title': 'DetailerDebug (SEGS)'}}, '291': {'inputs': {'facedetection': 'retinaface_resnet50', 'model': 'GFPGANv1.4.pth', 'visibility': 1, 'codeformer_weight': 0.5, 'image': ['288', 0]}, 'class_type': 'ReActorRestoreFace', '_meta': {'title': 'Restore Face'}}, '295': {'inputs': {'text': ['58', 0]}, 'class_type': 'ShowText|pysssss', '_meta': {'title': 'Show Text 🐍'}}, '359': {'inputs': {'model_name': '4x_UniversalUpscalerV2-Sharp_101000_G.pth'}, 'class_type': 'UpscaleModelLoader', '_meta': {'title': 'Load Upscale Model'}}, '360': {'inputs': {'upscale_model': ['359', 0], 'image': ['367', 0]}, 'class_type': 'ImageUpscaleWithModel', '_meta': {'title': 'Upscale Image (using Model)'}}, '362': {'inputs': {'images': ['360', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '367': {'inputs': {'size': 1024, 'mode': True, 'images': ['291', 0]}, 'class_type': 'ImageScaleDownToSize', '_meta': {'title': 'Scale Down To Size'}}}\ngot prompt\n\u001b[32m[rgthree] Using rgthree's optimized recursive execution.\u001b[0m\nError: extra_pnginfo[0] is not a dict or missing 'workflow' key\nRequested to load SDXLClipModel\nLoading 1 new model\nRequested to load SDXLClipModel\nLoading 1 new model\n----------------------------------------\n\u001b[36mEff. Loader SDXL Models Cache:\u001b[0m\nCkpt:\n[1] RealVisXL_V4.0\nRefn:\n[1] ProteusV0.5.2\nRequested to load SDXL\nLoading 1 new model\n 0%| | 0/12 [00:00<?, ?it/s]\n 8%|▊ | 1/12 [00:00<00:02, 4.91it/s]\n 17%|█▋ | 2/12 [00:00<00:02, 4.69it/s]\n 25%|██▌ | 3/12 [00:00<00:01, 4.85it/s]\n 33%|███▎ | 4/12 [00:00<00:01, 4.94it/s]\n 42%|████▏ | 5/12 [00:01<00:01, 4.98it/s]\n 50%|█████ | 6/12 [00:01<00:01, 4.96it/s]\n 58%|█████▊ | 7/12 [00:01<00:01, 4.83it/s]\n 67%|██████▋ | 8/12 [00:01<00:00, 4.89it/s]\n 75%|███████▌ | 9/12 [00:01<00:00, 4.97it/s]\n 83%|████████▎ | 10/12 [00:02<00:00, 5.00it/s]\n 92%|█████████▏| 11/12 [00:02<00:00, 5.03it/s]\n100%|██████████| 12/12 [00:02<00:00, 4.93it/s]\n100%|██████████| 12/12 [00:02<00:00, 4.93it/s]\nRequested to load SDXL\nLoading 1 new model\n 0%| | 0/8 [00:00<?, ?it/s]\n 12%|█▎ | 1/8 [00:00<00:01, 3.76it/s]\n 25%|██▌ | 2/8 [00:00<00:01, 3.87it/s]\n 38%|███▊ | 3/8 [00:00<00:01, 4.03it/s]\n 50%|█████ | 4/8 [00:00<00:00, 4.10it/s]\n 62%|██████▎ | 5/8 [00:01<00:00, 4.17it/s]\n 75%|███████▌ | 6/8 [00:01<00:00, 4.38it/s]\n 88%|████████▊ | 7/8 [00:01<00:00, 4.54it/s]\n100%|██████████| 8/8 [00:01<00:00, 4.67it/s]\n100%|██████████| 8/8 [00:01<00:00, 4.36it/s]\n0: 640x640 (no detections), 12.0ms\nSpeed: 3.7ms preprocess, 12.0ms inference, 1.1ms postprocess per image at shape (1, 3, 640, 640)\nRequested to load SDXLClipModel\nLoading 1 new model\n[ReActor] 14:23:10 - \u001b[38;5;173mSTATUS\u001b[0m - Restoring with GFPGANv1.4.pth\nPrompt executed in 25.06 seconds\nnode output: {'text': ['fashion editorial style (abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65 . high fashion, trendy, stylish, editorial, magazine style, professional, highly detailed']}\nnode output: {'images': [{'filename': 'ComfyUI_temp_pxcuf_00002_.png', 'subfolder': '', 'type': 'temp'}]}\ntemp\nnode output: {'text': ['fashion editorial style (abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65 . high fashion, trendy, stylish, editorial, magazine style, professional, highly detailed']}\nnode output: {'images': [{'filename': 'ComfyUI_temp_pxcuf_00002_.png', 'subfolder': '', 'type': 'temp'}]}\ntemp", "metrics": { "predict_time": 25.972954, "total_time": 224.880128 }, "output": "https://replicate.delivery/pbxt/mTGO49g726ZgAxQkDsYqvQGW44FttEGCPezCVvpyIxMrAwQJA/out-362.png", "started_at": "2024-03-18T14:22:53.265608Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/aara5zrbc6r2axj3spt6lwp4pu", "cancel": "https://api.replicate.com/v1/predictions/aara5zrbc6r2axj3spt6lwp4pu/cancel" }, "version": "6b73f6a104543620e702ae49aa2f8cb3d1157c0e50dec4ec54bd67524b8d2f03" }
Generated inStarting... Prompting {'37': {'inputs': {'vae_name': 'madebyollin_sdxl_vae.safetensors'}, 'class_type': 'VAELoader', '_meta': {'title': 'Load VAE'}}, '48': {'inputs': {'base_ckpt_name': 'RealVisXL_V4.0.safetensors', 'base_clip_skip': -1, 'refiner_ckpt_name': 'ProteusV0.5.2.safetensors', 'refiner_clip_skip': -1, 'positive_ascore': 6, 'negative_ascore': 2, 'vae_name': 'Baked VAE', 'positive': ['58', 0], 'negative': ['58', 1], 'token_normalization': 'none', 'weight_interpretation': 'A1111', 'empty_latent_width': ['49', 1], 'empty_latent_height': ['49', 2], 'batch_size': 1}, 'class_type': 'Eff. Loader SDXL', '_meta': {'title': 'Eff. Loader SDXL'}}, '49': {'inputs': {'resolution': '1024x1024 (1.0)', 'batch_size': 1}, 'class_type': 'SDXLEmptyLatentSizePicker+', '_meta': {'title': '🔧 SDXL Empty Latent Size Picker'}}, '52': {'inputs': {'sdxl_tuple': ['48', 0]}, 'class_type': 'Unpack SDXL Tuple', '_meta': {'title': 'Unpack SDXL Tuple'}}, '58': {'inputs': {'text_positive': '(abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65', 'text_negative': '(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, embedding:unaestheticXL_Alb2, (worst quality, low quality, normal quality), disabled body, (ugly), sketches, (manicure:1.2), lowres, watermark', 'style': 'ads-fashion editorial', 'log_prompt': 'No', 'style_name': True}, 'class_type': 'SDXLPromptStyler', '_meta': {'title': 'SDXL Prompt Styler'}}, '60': {'inputs': {'samples': ['221', 0], 'vae': ['37', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '61': {'inputs': {'samples': ['219', 0], 'vae': ['37', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '72': {'inputs': {'b1': 1.3, 'b2': 1.4, 's1': 0.9, 's2': 0.2, 'model': ['169', 0]}, 'class_type': 'FreeU_V2', '_meta': {'title': 'FreeU_V2'}}, '76': {'inputs': {'ratio': 0.5, 'model1': ['52', 0], 'model2': ['52', 4]}, 'class_type': 'ModelMergeSimple', '_meta': {'title': 'ModelMergeSimple'}}, '91': {'inputs': {'width': ['49', 1], 'height': ['49', 2], 'batch_size': 1}, 'class_type': 'EmptyLatentImage', '_meta': {'title': 'Kohya HiRes'}}, '98': {'inputs': {'seed': 364190}, 'class_type': 'Seed (rgthree)', '_meta': {'title': 'Seed (rgthree)'}}, '105': {'inputs': {'brightness': -0.05, 'contrast': 1, 'saturation': 1, 'sharpness': 1, 'blur': 0, 'gaussian_blur': 0, 'edge_enhance': 0, 'detail_enhance': 'true', 'image': ['61', 0]}, 'class_type': 'Image Filter Adjustments', '_meta': {'title': 'Image Filter Adjustments'}}, '108': {'inputs': {'model': ['72', 0], 'clip': ['223', 1], 'vae': ['37', 0], 'positive': ['52', 6], 'negative': ['52', 7]}, 'class_type': 'ToBasicPipe', '_meta': {'title': 'ToBasicPipe'}}, '160': {'inputs': {'model_name': '1x-ITF-SkinDiffDetail-Lite-v1.pth'}, 'class_type': 'UpscaleModelLoader', '_meta': {'title': 'Load Upscale Model'}}, '161': {'inputs': {'upscale_model': ['160', 0], 'image': ['105', 0]}, 'class_type': 'ImageUpscaleWithModel', '_meta': {'title': 'Upscale Image (using Model)'}}, '167': {'inputs': {'block_number': 6, 'downscale_factor': 2, 'start_percent': 0, 'end_percent': 0.35, 'downscale_after_skip': True, 'downscale_method': 'bicubic', 'upscale_method': 'bicubic', 'model': ['223', 0]}, 'class_type': 'PatchModelAddDownscale', '_meta': {'title': 'PatchModelAddDownscale (Kohya Deep Shrink)'}}, '169': {'inputs': {'scale': 0.5, 'blur_sigma': 2, 'model': ['167', 0]}, 'class_type': 'SelfAttentionGuidance', '_meta': {'title': 'Self-Attention Guidance'}}, '180': {'inputs': {'basic_pipe': ['108', 0]}, 'class_type': 'FromBasicPipe', '_meta': {'title': 'FromBasicPipe'}}, '219': {'inputs': {'add_noise': 'disable', 'noise_seed': ['98', 0], 'steps': 20, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'sgm_uniform', 'start_at_step': 12, 'end_at_step': 10000, 'return_with_leftover_noise': 'disable', 'sharpness': 2, 'model': ['72', 0], 'positive': ['52', 6], 'negative': ['52', 7], 'latent_image': ['221', 0]}, 'class_type': 'Fooocus_KSamplerAdvanced', '_meta': {'title': 'KSampler Adv. Fooocus'}}, '221': {'inputs': {'add_noise': 'enable', 'noise_seed': ['98', 0], 'steps': 20, 'cfg': 12, 'sampler_name': 'euler', 'scheduler': 'simple', 'start_at_step': 0, 'end_at_step': 12, 'return_with_leftover_noise': 'enable', 'sharpness': 2, 'model': ['52', 4], 'positive': ['52', 2], 'negative': ['52', 3], 'latent_image': ['91', 0]}, 'class_type': 'Fooocus_KSamplerAdvanced', '_meta': {'title': 'KSampler Adv. Fooocus'}}, '222': {'inputs': {'input_mode': 'simple', 'lora_count': 4, 'lora_name_1': 'sdxl_lightning_8step_lora.safetensors', 'lora_wt_1': 1, 'model_str_1': 1, 'clip_str_1': 1, 'lora_name_2': 'faces_v3.safetensors', 'lora_wt_2': 1, 'model_str_2': 1, 'clip_str_2': 1, 'lora_name_3': 'resolution_lora.safetensors', 'lora_wt_3': 1, 'model_str_3': 1, 'clip_str_3': 1, 'lora_name_4': 'XL_fix_hands.safetensors', 'lora_wt_4': 1.5, 'model_str_4': 1, 'clip_str_4': 1, 'lora_name_5': 'None', 'lora_wt_5': 1, 'model_str_5': 1, 'clip_str_5': 1, 'lora_name_6': 'faces_v3.safetensors', 'lora_wt_6': 1, 'model_str_6': 1, 'clip_str_6': 1, 'lora_name_7': 'None', 'lora_wt_7': 1, 'model_str_7': 1, 'clip_str_7': 1, 'lora_name_8': 'None', 'lora_wt_8': 1, 'model_str_8': 1, 'clip_str_8': 1, 'lora_name_9': 'None', 'lora_wt_9': 1, 'model_str_9': 1, 'clip_str_9': 1, 'lora_name_10': 'None', 'lora_wt_10': 1, 'model_str_10': 1, 'clip_str_10': 1, 'lora_name_11': 'None', 'lora_wt_11': 1, 'model_str_11': 1, 'clip_str_11': 1, 'lora_name_12': 'None', 'lora_wt_12': 1, 'model_str_12': 1, 'clip_str_12': 1, 'lora_name_13': 'None', 'lora_wt_13': 1, 'model_str_13': 1, 'clip_str_13': 1, 'lora_name_14': 'None', 'lora_wt_14': 1, 'model_str_14': 1, 'clip_str_14': 1, 'lora_name_15': 'None', 'lora_wt_15': 1, 'model_str_15': 1, 'clip_str_15': 1, 'lora_name_16': 'None', 'lora_wt_16': 1, 'model_str_16': 1, 'clip_str_16': 1, 'lora_name_17': 'None', 'lora_wt_17': 1, 'model_str_17': 1, 'clip_str_17': 1, 'lora_name_18': 'None', 'lora_wt_18': 1, 'model_str_18': 1, 'clip_str_18': 1, 'lora_name_19': 'None', 'lora_wt_19': 1, 'model_str_19': 1, 'clip_str_19': 1, 'lora_name_20': 'None', 'lora_wt_20': 1, 'model_str_20': 1, 'clip_str_20': 1, 'lora_name_21': 'None', 'lora_wt_21': 1, 'model_str_21': 1, 'clip_str_21': 1, 'lora_name_22': 'None', 'lora_wt_22': 1, 'model_str_22': 1, 'clip_str_22': 1, 'lora_name_23': 'None', 'lora_wt_23': 1, 'model_str_23': 1, 'clip_str_23': 1, 'lora_name_24': 'None', 'lora_wt_24': 1, 'model_str_24': 1, 'clip_str_24': 1, 'lora_name_25': 'None', 'lora_wt_25': 1, 'model_str_25': 1, 'clip_str_25': 1, 'lora_name_26': 'None', 'lora_wt_26': 1, 'model_str_26': 1, 'clip_str_26': 1, 'lora_name_27': 'None', 'lora_wt_27': 1, 'model_str_27': 1, 'clip_str_27': 1, 'lora_name_28': 'None', 'lora_wt_28': 1, 'model_str_28': 1, 'clip_str_28': 1, 'lora_name_29': 'None', 'lora_wt_29': 1, 'model_str_29': 1, 'clip_str_29': 1, 'lora_name_30': 'None', 'lora_wt_30': 1, 'model_str_30': 1, 'clip_str_30': 1, 'lora_name_31': 'None', 'lora_wt_31': 1, 'model_str_31': 1, 'clip_str_31': 1, 'lora_name_32': 'None', 'lora_wt_32': 1, 'model_str_32': 1, 'clip_str_32': 1, 'lora_name_33': 'None', 'lora_wt_33': 1, 'model_str_33': 1, 'clip_str_33': 1, 'lora_name_34': 'None', 'lora_wt_34': 1, 'model_str_34': 1, 'clip_str_34': 1, 'lora_name_35': 'None', 'lora_wt_35': 1, 'model_str_35': 1, 'clip_str_35': 1, 'lora_name_36': 'None', 'lora_wt_36': 1, 'model_str_36': 1, 'clip_str_36': 1, 'lora_name_37': 'None', 'lora_wt_37': 1, 'model_str_37': 1, 'clip_str_37': 1, 'lora_name_38': 'None', 'lora_wt_38': 1, 'model_str_38': 1, 'clip_str_38': 1, 'lora_name_39': 'None', 'lora_wt_39': 1, 'model_str_39': 1, 'clip_str_39': 1, 'lora_name_40': 'None', 'lora_wt_40': 1, 'model_str_40': 1, 'clip_str_40': 1, 'lora_name_41': 'None', 'lora_wt_41': 1, 'model_str_41': 1, 'clip_str_41': 1, 'lora_name_42': 'None', 'lora_wt_42': 1, 'model_str_42': 1, 'clip_str_42': 1, 'lora_name_43': 'None', 'lora_wt_43': 1, 'model_str_43': 1, 'clip_str_43': 1, 'lora_name_44': 'None', 'lora_wt_44': 1, 'model_str_44': 1, 'clip_str_44': 1, 'lora_name_45': 'None', 'lora_wt_45': 1, 'model_str_45': 1, 'clip_str_45': 1, 'lora_name_46': 'None', 'lora_wt_46': 1, 'model_str_46': 1, 'clip_str_46': 1, 'lora_name_47': 'None', 'lora_wt_47': 1, 'model_str_47': 1, 'clip_str_47': 1, 'lora_name_48': 'None', 'lora_wt_48': 1, 'model_str_48': 1, 'clip_str_48': 1, 'lora_name_49': 'None', 'lora_wt_49': 1, 'model_str_49': 1, 'clip_str_49': 1}, 'class_type': 'LoRA Stacker', '_meta': {'title': 'LoRA Stacker'}}, '223': {'inputs': {'model': ['250', 0], 'clip': ['52', 5], 'lora_stack': ['222', 0]}, 'class_type': 'CR Apply LoRA Stack', '_meta': {'title': '💊 CR Apply LoRA Stack'}}, '250': {'inputs': {'downsample_factor_depth_1': 2, 'downsample_factor_depth_2': 1, 'model': ['76', 0]}, 'class_type': 'ToDoPatchModel', '_meta': {'title': 'ToDo: Token Dowsampling'}}, '268': {'inputs': {'model_name': 'bbox/hand_yolov8s.pt'}, 'class_type': 'UltralyticsDetectorProvider', '_meta': {'title': 'UltralyticsDetectorProvider'}}, '269': {'inputs': {'bbox_threshold': 0.4, 'bbox_dilation': 0, 'crop_factor': 3, 'drop_size': 10, 'sub_threshold': 0.5, 'sub_dilation': 0, 'sub_bbox_expansion': 0, 'sam_mask_hint_threshold': 0.7, 'post_dilation': 0, 'bbox_detector': ['268', 0], 'image': ['161', 0]}, 'class_type': 'ImpactSimpleDetectorSEGS', '_meta': {'title': 'Simple Detector (SEGS)'}}, '271': {'inputs': {'segs': ['269', 0]}, 'class_type': 'SegsToCombinedMask', '_meta': {'title': 'SEGS to MASK (combined)'}}, '272': {'inputs': {'masks': ['271', 0]}, 'class_type': 'Convert Masks to Images', '_meta': {'title': 'Convert Masks to Images'}}, '276': {'inputs': {'text': 'ultra detailed, intricate details, cinematic, masterpiece, 8k, 4k, UHD', 'clip': ['180', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '277': {'inputs': {'text': 'ugly, deformed, embedding:unaestheticXL_Alb2, ', 'clip': ['180', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '280': {'inputs': {'guide_size': 384, 'guide_size_for': True, 'max_size': 1024, 'seed': 393669993784450, 'steps': 12, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'normal', 'denoise': 0.5, 'feather': 5, 'noise_mask': True, 'force_inpaint': True, 'wildcard': '<lora:XL_fix_hands:1.5>, <lora:DetailedEyes_V3:1.0>', 'cycle': 1, 'inpaint_model': False, 'noise_mask_feather': 20, 'image': ['161', 0], 'segs': ['269', 0], 'model': ['180', 0], 'clip': ['180', 1], 'vae': ['180', 2], 'positive': ['276', 0], 'negative': ['277', 0]}, 'class_type': 'DetailerForEachDebug', '_meta': {'title': 'DetailerDebug (SEGS)'}}, '288': {'inputs': {'guide_size': 384, 'guide_size_for': True, 'max_size': 1024, 'seed': 500442845647801, 'steps': 12, 'cfg': 2, 'sampler_name': 'euler', 'scheduler': 'normal', 'denoise': 0.5, 'feather': 5, 'noise_mask': True, 'force_inpaint': True, 'wildcard': '<lora:XL_fix_hands:1.5>, <lora:DetailedEyes_V3:1.0>', 'cycle': 1, 'inpaint_model': False, 'noise_mask_feather': 20, 'image': ['280', 0], 'segs': ['269', 0], 'model': ['180', 0], 'clip': ['180', 1], 'vae': ['180', 2], 'positive': ['276', 0], 'negative': ['277', 0]}, 'class_type': 'DetailerForEachDebug', '_meta': {'title': 'DetailerDebug (SEGS)'}}, '291': {'inputs': {'facedetection': 'retinaface_resnet50', 'model': 'GFPGANv1.4.pth', 'visibility': 1, 'codeformer_weight': 0.5, 'image': ['288', 0]}, 'class_type': 'ReActorRestoreFace', '_meta': {'title': 'Restore Face'}}, '295': {'inputs': {'text': ['58', 0]}, 'class_type': 'ShowText|pysssss', '_meta': {'title': 'Show Text 🐍'}}, '359': {'inputs': {'model_name': '4x_UniversalUpscalerV2-Sharp_101000_G.pth'}, 'class_type': 'UpscaleModelLoader', '_meta': {'title': 'Load Upscale Model'}}, '360': {'inputs': {'upscale_model': ['359', 0], 'image': ['367', 0]}, 'class_type': 'ImageUpscaleWithModel', '_meta': {'title': 'Upscale Image (using Model)'}}, '362': {'inputs': {'images': ['360', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '367': {'inputs': {'size': 1024, 'mode': True, 'images': ['291', 0]}, 'class_type': 'ImageScaleDownToSize', '_meta': {'title': 'Scale Down To Size'}}} got prompt [rgthree] Using rgthree's optimized recursive execution. Error: extra_pnginfo[0] is not a dict or missing 'workflow' key Requested to load SDXLClipModel Loading 1 new model Requested to load SDXLClipModel Loading 1 new model ---------------------------------------- Eff. Loader SDXL Models Cache: Ckpt: [1] RealVisXL_V4.0 Refn: [1] ProteusV0.5.2 Requested to load SDXL Loading 1 new model 0%| | 0/12 [00:00<?, ?it/s] 8%|▊ | 1/12 [00:00<00:02, 4.91it/s] 17%|█▋ | 2/12 [00:00<00:02, 4.69it/s] 25%|██▌ | 3/12 [00:00<00:01, 4.85it/s] 33%|███▎ | 4/12 [00:00<00:01, 4.94it/s] 42%|████▏ | 5/12 [00:01<00:01, 4.98it/s] 50%|█████ | 6/12 [00:01<00:01, 4.96it/s] 58%|█████▊ | 7/12 [00:01<00:01, 4.83it/s] 67%|██████▋ | 8/12 [00:01<00:00, 4.89it/s] 75%|███████▌ | 9/12 [00:01<00:00, 4.97it/s] 83%|████████▎ | 10/12 [00:02<00:00, 5.00it/s] 92%|█████████▏| 11/12 [00:02<00:00, 5.03it/s] 100%|██████████| 12/12 [00:02<00:00, 4.93it/s] 100%|██████████| 12/12 [00:02<00:00, 4.93it/s] Requested to load SDXL Loading 1 new model 0%| | 0/8 [00:00<?, ?it/s] 12%|█▎ | 1/8 [00:00<00:01, 3.76it/s] 25%|██▌ | 2/8 [00:00<00:01, 3.87it/s] 38%|███▊ | 3/8 [00:00<00:01, 4.03it/s] 50%|█████ | 4/8 [00:00<00:00, 4.10it/s] 62%|██████▎ | 5/8 [00:01<00:00, 4.17it/s] 75%|███████▌ | 6/8 [00:01<00:00, 4.38it/s] 88%|████████▊ | 7/8 [00:01<00:00, 4.54it/s] 100%|██████████| 8/8 [00:01<00:00, 4.67it/s] 100%|██████████| 8/8 [00:01<00:00, 4.36it/s] 0: 640x640 (no detections), 12.0ms Speed: 3.7ms preprocess, 12.0ms inference, 1.1ms postprocess per image at shape (1, 3, 640, 640) Requested to load SDXLClipModel Loading 1 new model [ReActor] 14:23:10 - STATUS - Restoring with GFPGANv1.4.pth Prompt executed in 25.06 seconds node output: {'text': ['fashion editorial style (abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65 . high fashion, trendy, stylish, editorial, magazine style, professional, highly detailed']} node output: {'images': [{'filename': 'ComfyUI_temp_pxcuf_00002_.png', 'subfolder': '', 'type': 'temp'}]} temp node output: {'text': ['fashion editorial style (abstract:1.8) photo of photo of a beautiful woman age of 40, with long curled red hair, (smiling, soft smile, little smile:1.1), wearing a casual dress, walking, (fashion sunglasses:1.5), in the city, street, traffic lights in background, lights background, sunny, intense sunlight, posed in a city park in autumn with fallen leaves in background, close up on face of, bounced lighting, low angle, shot from below, shot on ALEXA 65 . high fashion, trendy, stylish, editorial, magazine style, professional, highly detailed']} node output: {'images': [{'filename': 'ComfyUI_temp_pxcuf_00002_.png', 'subfolder': '', 'type': 'temp'}]} temp
Prediction
growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0IDnvp3slrbbxb6atcoa3l7unukwiStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- prompt
- photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face
- resolution
- 1024 x 1024 (square)
{ "prompt": "photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face", "resolution": "1024 x 1024 (square)" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; import fs from "node:fs"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", { input: { prompt: "photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face", resolution: "1024 x 1024 (square)" } } ); // To access the file URL: console.log(output.url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", input={ "prompt": "photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face", "resolution": "1024 x 1024 (square)" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run growthmkt/prompt using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "growthmkt/prompt:3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0", "input": { "prompt": "photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face", "resolution": "1024 x 1024 (square)" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-03-20T21:11:02.336883Z", "created_at": "2024-03-20T21:10:57.509605Z", "data_removed": false, "error": null, "id": "nvp3slrbbxb6atcoa3l7unukwi", "input": { "prompt": "photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face", "resolution": "1024 x 1024 (square)" }, "logs": "Starting...\nNo seed - create brand new 6361503\nPrompting {'33': {'inputs': {'samples': ['38', 0], 'vae': ['37', 2]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '34': {'inputs': {'images': ['33', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '37': {'inputs': {'ckpt_name': 'Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors'}, 'class_type': 'CheckpointLoaderSimple', '_meta': {'title': 'Load Checkpoint'}}, '38': {'inputs': {'seed': 6361503, 'steps': 35, 'cfg': 7, 'sampler_name': 'dpmpp_2m_sde_gpu', 'scheduler': 'karras', 'denoise': 1, 'model': ['37', 0], 'positive': ['39', 0], 'negative': ['40', 0], 'latent_image': ['41', 0]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '39': {'inputs': {'text': 'photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '40': {'inputs': {'text': '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '41': {'inputs': {'dimensions': '1024 x 1024 (square)', 'clip_scale': 2, 'batch_size': 1}, 'class_type': 'SDXL Empty Latent Image (rgthree)', '_meta': {'title': 'SDXL Empty Latent Image (rgthree)'}}, '42': {'inputs': {'stop_at_clip_layer': -1, 'clip': ['37', 1]}, 'class_type': 'CLIPSetLastLayer', '_meta': {'title': 'CLIP Set Last Layer'}}}\ngot prompt\n\u001b[32m[rgthree] Using rgthree's optimized recursive execution.\u001b[0m\n 0%| | 0/35 [00:00<?, ?it/s]\n 3%|▎ | 1/35 [00:00<00:04, 7.23it/s]\n 6%|▌ | 2/35 [00:00<00:04, 8.00it/s]\n 9%|▊ | 3/35 [00:00<00:03, 8.30it/s]\n 11%|█▏ | 4/35 [00:00<00:03, 8.50it/s]\n 14%|█▍ | 5/35 [00:00<00:03, 8.59it/s]\n 17%|█▋ | 6/35 [00:00<00:03, 8.53it/s]\n 20%|██ | 7/35 [00:00<00:03, 8.61it/s]\n 23%|██▎ | 8/35 [00:00<00:03, 8.64it/s]\n 26%|██▌ | 9/35 [00:01<00:02, 8.67it/s]\n 29%|██▊ | 10/35 [00:01<00:02, 8.68it/s]\n 31%|███▏ | 11/35 [00:01<00:02, 8.68it/s]\n 34%|███▍ | 12/35 [00:01<00:02, 8.71it/s]\n 37%|███▋ | 13/35 [00:01<00:02, 8.69it/s]\n 40%|████ | 14/35 [00:01<00:02, 8.67it/s]\n 43%|████▎ | 15/35 [00:01<00:02, 8.66it/s]\n 46%|████▌ | 16/35 [00:01<00:02, 8.64it/s]\n 49%|████▊ | 17/35 [00:01<00:02, 8.67it/s]\n 51%|█████▏ | 18/35 [00:02<00:01, 8.68it/s]\n 54%|█████▍ | 19/35 [00:02<00:01, 8.72it/s]\n 57%|█████▋ | 20/35 [00:02<00:01, 8.72it/s]\n 60%|██████ | 21/35 [00:02<00:01, 8.75it/s]\n 63%|██████▎ | 22/35 [00:02<00:01, 8.77it/s]\n 66%|██████▌ | 23/35 [00:02<00:01, 8.75it/s]\n 69%|██████▊ | 24/35 [00:02<00:01, 8.76it/s]\n 71%|███████▏ | 25/35 [00:02<00:01, 8.63it/s]\n 74%|███████▍ | 26/35 [00:03<00:01, 8.66it/s]\n 77%|███████▋ | 27/35 [00:03<00:00, 8.68it/s]\n 80%|████████ | 28/35 [00:03<00:00, 8.72it/s]\n 83%|████████▎ | 29/35 [00:03<00:00, 8.75it/s]\n 86%|████████▌ | 30/35 [00:03<00:00, 8.78it/s]\n 89%|████████▊ | 31/35 [00:03<00:00, 8.76it/s]\n 91%|█████████▏| 32/35 [00:03<00:00, 8.61it/s]\n 94%|█████████▍| 33/35 [00:03<00:00, 8.69it/s]\n 97%|█████████▋| 34/35 [00:03<00:00, 8.72it/s]\n100%|██████████| 35/35 [00:04<00:00, 8.92it/s]\n100%|██████████| 35/35 [00:04<00:00, 8.67it/s]\nPrompt executed in 4.43 seconds\nnode output: {'images': [{'filename': 'ComfyUI_temp_rprkl_00002_.png', 'subfolder': '', 'type': 'temp'}]}\ntemp", "metrics": { "predict_time": 4.813904, "total_time": 4.827278 }, "output": "https://replicate.delivery/pbxt/48nPiBu1eaQwViPSNDX1OCu7nRH9aYrtumsT9jp2ZnKzFIRJA/out-34.png", "started_at": "2024-03-20T21:10:57.522979Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/nvp3slrbbxb6atcoa3l7unukwi", "cancel": "https://api.replicate.com/v1/predictions/nvp3slrbbxb6atcoa3l7unukwi/cancel" }, "version": "3f980fffdb4caa407d02ae5c6efdd86698a43b2c6bf9817deb75c75842db06b0" }
Generated inStarting... No seed - create brand new 6361503 Prompting {'33': {'inputs': {'samples': ['38', 0], 'vae': ['37', 2]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '34': {'inputs': {'images': ['33', 0]}, 'class_type': 'PreviewImage', '_meta': {'title': 'Preview Image'}}, '37': {'inputs': {'ckpt_name': 'Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors'}, 'class_type': 'CheckpointLoaderSimple', '_meta': {'title': 'Load Checkpoint'}}, '38': {'inputs': {'seed': 6361503, 'steps': 35, 'cfg': 7, 'sampler_name': 'dpmpp_2m_sde_gpu', 'scheduler': 'karras', 'denoise': 1, 'model': ['37', 0], 'positive': ['39', 0], 'negative': ['40', 0], 'latent_image': ['41', 0]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '39': {'inputs': {'text': 'photo of an Indian woman, with a rich, dark, braided hair, practicing yoga, full body framing, in a serene outdoor setting, under bounced light, sharp focus, photo by greg rutkowski, soft lighting, vibrant colors, masterpiece, ((streets)), detailed face', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '40': {'inputs': {'text': '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', 'clip': ['42', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Prompt)'}}, '41': {'inputs': {'dimensions': '1024 x 1024 (square)', 'clip_scale': 2, 'batch_size': 1}, 'class_type': 'SDXL Empty Latent Image (rgthree)', '_meta': {'title': 'SDXL Empty Latent Image (rgthree)'}}, '42': {'inputs': {'stop_at_clip_layer': -1, 'clip': ['37', 1]}, 'class_type': 'CLIPSetLastLayer', '_meta': {'title': 'CLIP Set Last Layer'}}} got prompt [rgthree] Using rgthree's optimized recursive execution. 0%| | 0/35 [00:00<?, ?it/s] 3%|▎ | 1/35 [00:00<00:04, 7.23it/s] 6%|▌ | 2/35 [00:00<00:04, 8.00it/s] 9%|▊ | 3/35 [00:00<00:03, 8.30it/s] 11%|█▏ | 4/35 [00:00<00:03, 8.50it/s] 14%|█▍ | 5/35 [00:00<00:03, 8.59it/s] 17%|█▋ | 6/35 [00:00<00:03, 8.53it/s] 20%|██ | 7/35 [00:00<00:03, 8.61it/s] 23%|██▎ | 8/35 [00:00<00:03, 8.64it/s] 26%|██▌ | 9/35 [00:01<00:02, 8.67it/s] 29%|██▊ | 10/35 [00:01<00:02, 8.68it/s] 31%|███▏ | 11/35 [00:01<00:02, 8.68it/s] 34%|███▍ | 12/35 [00:01<00:02, 8.71it/s] 37%|███▋ | 13/35 [00:01<00:02, 8.69it/s] 40%|████ | 14/35 [00:01<00:02, 8.67it/s] 43%|████▎ | 15/35 [00:01<00:02, 8.66it/s] 46%|████▌ | 16/35 [00:01<00:02, 8.64it/s] 49%|████▊ | 17/35 [00:01<00:02, 8.67it/s] 51%|█████▏ | 18/35 [00:02<00:01, 8.68it/s] 54%|█████▍ | 19/35 [00:02<00:01, 8.72it/s] 57%|█████▋ | 20/35 [00:02<00:01, 8.72it/s] 60%|██████ | 21/35 [00:02<00:01, 8.75it/s] 63%|██████▎ | 22/35 [00:02<00:01, 8.77it/s] 66%|██████▌ | 23/35 [00:02<00:01, 8.75it/s] 69%|██████▊ | 24/35 [00:02<00:01, 8.76it/s] 71%|███████▏ | 25/35 [00:02<00:01, 8.63it/s] 74%|███████▍ | 26/35 [00:03<00:01, 8.66it/s] 77%|███████▋ | 27/35 [00:03<00:00, 8.68it/s] 80%|████████ | 28/35 [00:03<00:00, 8.72it/s] 83%|████████▎ | 29/35 [00:03<00:00, 8.75it/s] 86%|████████▌ | 30/35 [00:03<00:00, 8.78it/s] 89%|████████▊ | 31/35 [00:03<00:00, 8.76it/s] 91%|█████████▏| 32/35 [00:03<00:00, 8.61it/s] 94%|█████████▍| 33/35 [00:03<00:00, 8.69it/s] 97%|█████████▋| 34/35 [00:03<00:00, 8.72it/s] 100%|██████████| 35/35 [00:04<00:00, 8.92it/s] 100%|██████████| 35/35 [00:04<00:00, 8.67it/s] Prompt executed in 4.43 seconds node output: {'images': [{'filename': 'ComfyUI_temp_rprkl_00002_.png', 'subfolder': '', 'type': 'temp'}]} temp
Want to make some of these yourself?
Run this model