Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
fofr /pulid-lightning:a0321a0c
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/pulid-lightning using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/pulid-lightning:a0321a0cef20aa9e0828f8a9873dc3fd019266bc929f2fb8d70b76d52e122382",
{
input: {
width: 1024,
height: 1024,
prompt: "A photo of a person",
face_image: "https://replicate.delivery/pbxt/Kt8cUyAZgWIqv4GEW38rhHgqhVQp5vKkkvs1xgsaijicnz4b/guy.webp",
face_style: "high-fidelity",
output_format: "webp",
output_quality: 80,
negative_prompt: "",
checkpoint_model: "realistic",
number_of_images: 1
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/pulid-lightning using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/pulid-lightning:a0321a0cef20aa9e0828f8a9873dc3fd019266bc929f2fb8d70b76d52e122382",
input={
"width": 1024,
"height": 1024,
"prompt": "A photo of a person",
"face_image": "https://replicate.delivery/pbxt/Kt8cUyAZgWIqv4GEW38rhHgqhVQp5vKkkvs1xgsaijicnz4b/guy.webp",
"face_style": "high-fidelity",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"checkpoint_model": "realistic",
"number_of_images": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/pulid-lightning using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "a0321a0cef20aa9e0828f8a9873dc3fd019266bc929f2fb8d70b76d52e122382",
"input": {
"width": 1024,
"height": 1024,
"prompt": "A photo of a person",
"face_image": "https://replicate.delivery/pbxt/Kt8cUyAZgWIqv4GEW38rhHgqhVQp5vKkkvs1xgsaijicnz4b/guy.webp",
"face_style": "high-fidelity",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"checkpoint_model": "realistic",
"number_of_images": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2024-05-09T10:21:44.078988Z",
"created_at": "2024-05-09T10:21:35.511000Z",
"data_removed": false,
"error": null,
"id": "89ga3jk5jxrgg0cfbkfvj6hjb0",
"input": {
"width": 1024,
"height": 1024,
"prompt": "A photo of a person",
"face_image": "https://replicate.delivery/pbxt/Kt8cUyAZgWIqv4GEW38rhHgqhVQp5vKkkvs1xgsaijicnz4b/guy.webp",
"face_style": "high-fidelity",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"checkpoint_model": "realistic",
"number_of_images": 1
},
"logs": "Random seed set to: 3375421544\nChecking inputs\n✅ /tmp/inputs/image.png\n====================================\nChecking weights\n✅ ip-adapter_pulid_sdxl_fp16.safetensors\n✅ dreamshaperXL_lightningDPMSDE.safetensors\n✅ models/antelopev2\n✅ Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors\n====================================\nRunning workflow\ngot prompt\nExecuting node 4, title: Load Checkpoint, class type: CheckpointLoaderSimple\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight', 'clip_g.logit_scale']\nloaded straight to GPU\nRequested to load SDXL\nLoading 1 new model\nExecuting node 33, title: Apply Pulid, class type: ApplyPulid\nExecuting node 22, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nRequested to load SDXLClipModel\nLoading 1 new model\nExecuting node 23, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nExecuting node 3, title: KSampler, class type: KSampler\nRequested to load SDXL\nLoading 1 new model\n 0%| | 0/4 [00:00<?, ?it/s]\n 25%|██▌ | 1/4 [00:00<00:01, 1.93it/s]\n 50%|█████ | 2/4 [00:01<00:01, 1.84it/s]\n 75%|███████▌ | 3/4 [00:01<00:00, 1.90it/s]\n100%|██████████| 4/4 [00:01<00:00, 2.41it/s]\n100%|██████████| 4/4 [00:01<00:00, 2.19it/s]\nRequested to load AutoencoderKL\nLoading 1 new model\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 47, title: Save Image, class type: SaveImage\nPrompt executed in 6.29 seconds\noutputs: {'47': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}\n====================================\nComfyUI_00001_.png",
"metrics": {
"predict_time": 8.531323,
"total_time": 8.567988
},
"output": [
"https://replicate.delivery/pbxt/g2Kybo6t34pCJ5zSewsh6ZiFKHeA23MlbYqmBVpMARy3WlySA/ComfyUI_00001_.webp"
],
"started_at": "2024-05-09T10:21:35.547665Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/89ga3jk5jxrgg0cfbkfvj6hjb0",
"cancel": "https://api.replicate.com/v1/predictions/89ga3jk5jxrgg0cfbkfvj6hjb0/cancel"
},
"version": "a0321a0cef20aa9e0828f8a9873dc3fd019266bc929f2fb8d70b76d52e122382"
}
Random seed set to: 3375421544
Checking inputs
✅ /tmp/inputs/image.png
====================================
Checking weights
✅ ip-adapter_pulid_sdxl_fp16.safetensors
✅ dreamshaperXL_lightningDPMSDE.safetensors
✅ models/antelopev2
✅ Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors
====================================
Running workflow
got prompt
Executing node 4, title: Load Checkpoint, class type: CheckpointLoaderSimple
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight', 'clip_g.logit_scale']
loaded straight to GPU
Requested to load SDXL
Loading 1 new model
Executing node 33, title: Apply Pulid, class type: ApplyPulid
Executing node 22, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Requested to load SDXLClipModel
Loading 1 new model
Executing node 23, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Executing node 3, title: KSampler, class type: KSampler
Requested to load SDXL
Loading 1 new model
0%| | 0/4 [00:00<?, ?it/s]
25%|██▌ | 1/4 [00:00<00:01, 1.93it/s]
50%|█████ | 2/4 [00:01<00:01, 1.84it/s]
75%|███████▌ | 3/4 [00:01<00:00, 1.90it/s]
100%|██████████| 4/4 [00:01<00:00, 2.41it/s]
100%|██████████| 4/4 [00:01<00:00, 2.19it/s]
Requested to load AutoencoderKL
Loading 1 new model
Executing node 8, title: VAE Decode, class type: VAEDecode
Executing node 47, title: Save Image, class type: SaveImage
Prompt executed in 6.29 seconds
outputs: {'47': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}
====================================
ComfyUI_00001_.png