Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
fofr /ays-text-to-image:a004c3ac
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/ays-text-to-image using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/ays-text-to-image:a004c3ac8f62ac95a90b5a0c264beb47b66a6d1f8141b76fb27cd90e9a8bfe8e",
{
input: {
steps: 10,
width: 1156,
height: 768,
prompt: "a photo of some steps",
checkpoint: "ProteusV0.4.safetensors",
num_outputs: 1,
sampler_name: "euler",
output_format: "webp",
guidance_scale: 7.5,
output_quality: 80,
negative_prompt: ""
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/ays-text-to-image using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/ays-text-to-image:a004c3ac8f62ac95a90b5a0c264beb47b66a6d1f8141b76fb27cd90e9a8bfe8e",
input={
"steps": 10,
"width": 1156,
"height": 768,
"prompt": "a photo of some steps",
"checkpoint": "ProteusV0.4.safetensors",
"num_outputs": 1,
"sampler_name": "euler",
"output_format": "webp",
"guidance_scale": 7.5,
"output_quality": 80,
"negative_prompt": ""
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/ays-text-to-image using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "a004c3ac8f62ac95a90b5a0c264beb47b66a6d1f8141b76fb27cd90e9a8bfe8e",
"input": {
"steps": 10,
"width": 1156,
"height": 768,
"prompt": "a photo of some steps",
"checkpoint": "ProteusV0.4.safetensors",
"num_outputs": 1,
"sampler_name": "euler",
"output_format": "webp",
"guidance_scale": 7.5,
"output_quality": 80,
"negative_prompt": ""
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2024-04-30T11:57:02.551140Z",
"created_at": "2024-04-30T11:56:50.121000Z",
"data_removed": false,
"error": null,
"id": "5enrvraw95rgm0cf5veva7rggm",
"input": {
"steps": 10,
"width": 1156,
"height": 768,
"prompt": "a photo of some steps",
"checkpoint": "ProteusV0.4.safetensors",
"num_outputs": 1,
"sampler_name": "euler",
"output_format": "webp",
"guidance_scale": 7.5,
"output_quality": 80,
"negative_prompt": ""
},
"logs": "Random seed set to: 2615771573\nChecking inputs\n====================================\nChecking weights\n⏳ Downloading ProteusV0.4.safetensors to ComfyUI/models/checkpoints\n⌛️ Downloaded ProteusV0.4.safetensors in 7.46s, size: 6617.77MB\n✅ ProteusV0.4.safetensors\n====================================\nRunning workflow\ngot prompt\nExecuting node 4, title: Load Checkpoint, class type: CheckpointLoaderSimple\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']\nloaded straight to GPU\nRequested to load SDXL\nLoading 1 new model\nExecuting node 6, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nRequested to load SDXLClipModel\nLoading 1 new model\nExecuting node 19, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nExecuting node 21, title: SamplerCustom, class type: SamplerCustom\n 0%| | 0/10 [00:00<?, ?it/s]\n 20%|██ | 2/10 [00:00<00:00, 8.88it/s]\n 30%|███ | 3/10 [00:00<00:00, 7.63it/s]\n 40%|████ | 4/10 [00:00<00:00, 7.13it/s]\n 50%|█████ | 5/10 [00:00<00:00, 6.87it/s]\n 60%|██████ | 6/10 [00:00<00:00, 6.72it/s]\n 70%|███████ | 7/10 [00:01<00:00, 6.59it/s]\n 80%|████████ | 8/10 [00:01<00:00, 6.53it/s]\n 90%|█████████ | 9/10 [00:01<00:00, 6.49it/s]\n100%|██████████| 10/10 [00:01<00:00, 6.47it/s]\n100%|██████████| 10/10 [00:01<00:00, 6.78it/s]\nRequested to load AutoencoderKL\nLoading 1 new model\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 9, title: Save Image, class type: SaveImage\nPrompt executed in 4.16 seconds\noutputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}\n====================================\nContents of /tmp/outputs:\nComfyUI_00001_.png",
"metrics": {
"predict_time": 12.400336,
"total_time": 12.43014
},
"output": [
"https://replicate.delivery/pbxt/HV02s0HiY9JfJCwavsHeeeypu1rphkf56lpi2HqvVtftjO6rE/ComfyUI_00001_.webp"
],
"started_at": "2024-04-30T11:56:50.150804Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/5enrvraw95rgm0cf5veva7rggm",
"cancel": "https://api.replicate.com/v1/predictions/5enrvraw95rgm0cf5veva7rggm/cancel"
},
"version": "a004c3ac8f62ac95a90b5a0c264beb47b66a6d1f8141b76fb27cd90e9a8bfe8e"
}
Random seed set to: 2615771573
Checking inputs
====================================
Checking weights
⏳ Downloading ProteusV0.4.safetensors to ComfyUI/models/checkpoints
⌛️ Downloaded ProteusV0.4.safetensors in 7.46s, size: 6617.77MB
✅ ProteusV0.4.safetensors
====================================
Running workflow
got prompt
Executing node 4, title: Load Checkpoint, class type: CheckpointLoaderSimple
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']
loaded straight to GPU
Requested to load SDXL
Loading 1 new model
Executing node 6, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Requested to load SDXLClipModel
Loading 1 new model
Executing node 19, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Executing node 21, title: SamplerCustom, class type: SamplerCustom
0%| | 0/10 [00:00<?, ?it/s]
20%|██ | 2/10 [00:00<00:00, 8.88it/s]
30%|███ | 3/10 [00:00<00:00, 7.63it/s]
40%|████ | 4/10 [00:00<00:00, 7.13it/s]
50%|█████ | 5/10 [00:00<00:00, 6.87it/s]
60%|██████ | 6/10 [00:00<00:00, 6.72it/s]
70%|███████ | 7/10 [00:01<00:00, 6.59it/s]
80%|████████ | 8/10 [00:01<00:00, 6.53it/s]
90%|█████████ | 9/10 [00:01<00:00, 6.49it/s]
100%|██████████| 10/10 [00:01<00:00, 6.47it/s]
100%|██████████| 10/10 [00:01<00:00, 6.78it/s]
Requested to load AutoencoderKL
Loading 1 new model
Executing node 8, title: VAE Decode, class type: VAEDecode
Executing node 9, title: Save Image, class type: SaveImage
Prompt executed in 4.16 seconds
outputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}
====================================
Contents of /tmp/outputs:
ComfyUI_00001_.png