fofr / style-transfer
Transfer the style of one image to another
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/style-transfer using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/style-transfer:f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0",
{
input: {
model: "fast",
width: 1024,
height: 1024,
prompt: "An astronaut riding a unicorn",
style_image: "https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg",
output_format: "webp",
output_quality: 80,
negative_prompt: "",
number_of_images: 1,
structure_depth_strength: 1,
structure_denoising_strength: 0.65
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/style-transfer using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/style-transfer:f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0",
input={
"model": "fast",
"width": 1024,
"height": 1024,
"prompt": "An astronaut riding a unicorn",
"style_image": "https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"number_of_images": 1,
"structure_depth_strength": 1,
"structure_denoising_strength": 0.65
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/style-transfer using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/style-transfer:f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0",
"input": {
"model": "fast",
"width": 1024,
"height": 1024,
"prompt": "An astronaut riding a unicorn",
"style_image": "https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"number_of_images": 1,
"structure_depth_strength": 1,
"structure_denoising_strength": 0.65
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/style-transfer@sha256:f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0 \
-i 'model="fast"' \
-i 'width=1024' \
-i 'height=1024' \
-i 'prompt="An astronaut riding a unicorn"' \
-i 'style_image="https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg"' \
-i 'output_format="webp"' \
-i 'output_quality=80' \
-i 'negative_prompt=""' \
-i 'number_of_images=1' \
-i 'structure_depth_strength=1' \
-i 'structure_denoising_strength=0.65'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/style-transfer@sha256:f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "model": "fast", "width": 1024, "height": 1024, "prompt": "An astronaut riding a unicorn", "style_image": "https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg", "output_format": "webp", "output_quality": 80, "negative_prompt": "", "number_of_images": 1, "structure_depth_strength": 1, "structure_denoising_strength": 0.65 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.0064. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2024-11-28T11:15:29.481256Z",
"created_at": "2024-11-28T11:15:23.572000Z",
"data_removed": false,
"error": null,
"id": "t71z6mcv6hrma0cke9xrtyzm1w",
"input": {
"model": "fast",
"width": 1024,
"height": 1024,
"prompt": "An astronaut riding a unicorn",
"style_image": "https://replicate.delivery/pbxt/KlTqluRakBzt7N5mm1WExEQCc4J3usa7E3n5dhttcayTqFRm/van-gogh.jpeg",
"output_format": "webp",
"output_quality": 80,
"negative_prompt": "",
"number_of_images": 1,
"structure_depth_strength": 1,
"structure_denoising_strength": 0.65
},
"logs": "Random seed set to: 1640868803\nChecking weights\nIncluding weights for IPAdapter preset: PLUS (high strength)\n✅ ip-adapter-plus_sdxl_vit-h.safetensors\n✅ CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\n✅ dreamshaperXL_lightningDPMSDE.safetensors\n====================================\nRunning workflow\ngot prompt\nExecuting node 2, title: Load Checkpoint, class type: CheckpointLoaderSimple\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']\nloaded straight to GPU\nRequested to load SDXL\nLoading 1 new model\nExecuting node 1, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader\n\u001b[33mINFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\u001b[0m\n\u001b[33mINFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sdxl_vit-h.safetensors\u001b[0m\nExecuting node 5, title: Load Image, class type: LoadImage\n\u001b[33mINFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.\u001b[0m\nExecuting node 4, title: IPAdapter, class type: IPAdapter\nRequested to load CLIPVisionModelProjection\nLoading 1 new model\nExecuting node 6, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nRequested to load SDXLClipModel\nLoading 1 new model\nExecuting node 7, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode\nExecuting node 10, title: Empty Latent Image, class type: EmptyLatentImage\nExecuting node 3, title: KSampler, class type: KSampler\nRequested to load SDXL\nLoading 1 new model\n 0%| | 0/4 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=14.614644050598145 and t1=14.614643.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n 25%|██▌ | 1/4 [00:00<00:01, 2.99it/s]\n 50%|█████ | 2/4 [00:00<00:00, 3.64it/s]\n 75%|███████▌ | 3/4 [00:00<00:00, 3.94it/s]\n100%|██████████| 4/4 [00:00<00:00, 5.05it/s]\n100%|██████████| 4/4 [00:00<00:00, 4.40it/s]\nRequested to load AutoencoderKL\nLoading 1 new model\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 9, title: Save Image, class type: SaveImage\nPrompt executed in 5.15 seconds\noutputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}\n====================================\nComfyUI_00001_.png",
"metrics": {
"predict_time": 5.8975421279999996,
"total_time": 5.909256
},
"output": [
"https://replicate.delivery/xezq/OgPfUK3cTWSYF6ZKr30ItkZClxuuf3INMygNv48O47wRLg1TA/ComfyUI_00001_.webp"
],
"started_at": "2024-11-28T11:15:23.583714Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-ylw2cq4qmhgzi5wukon7sd7n2euktdfpoled6mp6tqvfknt5xnqq",
"get": "https://api.replicate.com/v1/predictions/t71z6mcv6hrma0cke9xrtyzm1w",
"cancel": "https://api.replicate.com/v1/predictions/t71z6mcv6hrma0cke9xrtyzm1w/cancel"
},
"version": "f1023890703bc0a5a3a2c21b5e498833be5f6ef6e70e9daf6b9b3a4fd8309cf0"
}
Random seed set to: 1640868803
Checking weights
Including weights for IPAdapter preset: PLUS (high strength)
✅ ip-adapter-plus_sdxl_vit-h.safetensors
✅ CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
✅ dreamshaperXL_lightningDPMSDE.safetensors
====================================
Running workflow
got prompt
Executing node 2, title: Load Checkpoint, class type: CheckpointLoaderSimple
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']
loaded straight to GPU
Requested to load SDXL
Loading 1 new model
Executing node 1, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader
INFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
INFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sdxl_vit-h.safetensors
Executing node 5, title: Load Image, class type: LoadImage
INFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.
Executing node 4, title: IPAdapter, class type: IPAdapter
Requested to load CLIPVisionModelProjection
Loading 1 new model
Executing node 6, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Requested to load SDXLClipModel
Loading 1 new model
Executing node 7, title: CLIP Text Encode (Prompt), class type: CLIPTextEncode
Executing node 10, title: Empty Latent Image, class type: EmptyLatentImage
Executing node 3, title: KSampler, class type: KSampler
Requested to load SDXL
Loading 1 new model
0%| | 0/4 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=14.614644050598145 and t1=14.614643.
warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.")
25%|██▌ | 1/4 [00:00<00:01, 2.99it/s]
50%|█████ | 2/4 [00:00<00:00, 3.64it/s]
75%|███████▌ | 3/4 [00:00<00:00, 3.94it/s]
100%|██████████| 4/4 [00:00<00:00, 5.05it/s]
100%|██████████| 4/4 [00:00<00:00, 4.40it/s]
Requested to load AutoencoderKL
Loading 1 new model
Executing node 8, title: VAE Decode, class type: VAEDecode
Executing node 9, title: Save Image, class type: SaveImage
Prompt executed in 5.15 seconds
outputs: {'9': {'images': [{'filename': 'ComfyUI_00001_.png', 'subfolder': '', 'type': 'output'}]}}
====================================
ComfyUI_00001_.png
Examples
Run time and cost
This model costs approximately $0.0064 to run on Replicate, or 156 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia L40S GPU hardware. Predictions typically complete within 7 seconds.