Readme
This model doesn't have a readme.
Flux fine-tuned to write and draw in condensation
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/flux-condensation using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/flux-condensation:7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d",
{
input: {
model: "dev",
prompt: "the words \"condensation lora\" written in CONDENSATION on a window",
go_fast: false,
lora_scale: 1,
megapixels: "1",
num_outputs: 1,
aspect_ratio: "3:2",
output_format: "jpg",
guidance_scale: 3,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/flux-condensation using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/flux-condensation:7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d",
input={
"model": "dev",
"prompt": "the words \"condensation lora\" written in CONDENSATION on a window",
"go_fast": False,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "3:2",
"output_format": "jpg",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/flux-condensation using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/flux-condensation:7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d",
"input": {
"model": "dev",
"prompt": "the words \\"condensation lora\\" written in CONDENSATION on a window",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "3:2",
"output_format": "jpg",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/flux-condensation@sha256:7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d \
-i 'model="dev"' \
-i $'prompt="the words \\"condensation lora\\" written in CONDENSATION on a window"' \
-i 'go_fast=false' \
-i 'lora_scale=1' \
-i 'megapixels="1"' \
-i 'num_outputs=1' \
-i 'aspect_ratio="3:2"' \
-i 'output_format="jpg"' \
-i 'guidance_scale=3' \
-i 'output_quality=80' \
-i 'prompt_strength=0.8' \
-i 'extra_lora_scale=1' \
-i 'num_inference_steps=28'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/flux-condensation@sha256:7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "model": "dev", "prompt": "the words \\"condensation lora\\" written in CONDENSATION on a window", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "3:2", "output_format": "jpg", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.0062. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-12-14T14:36:06.190994Z",
"created_at": "2024-12-14T14:35:57.714000Z",
"data_removed": false,
"error": null,
"id": "w2y2ep7vj9rma0ckrpca9y023c",
"input": {
"model": "dev",
"prompt": "the words \"condensation lora\" written in CONDENSATION on a window",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "3:2",
"output_format": "jpg",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
},
"logs": "2024-12-14 14:35:57.841 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-14 14:35:57.842 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.72it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.54it/s]\n2024-12-14 14:35:57.958 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s\nfree=28809760784384\nDownloading weights\n2024-12-14T14:35:57Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmparzl_chr/weights url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar\n2024-12-14T14:36:00Z | INFO | [ Complete ] dest=/tmp/tmparzl_chr/weights size=\"172 MB\" total_elapsed=2.339s url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar\nDownloaded weights in 2.36s\n2024-12-14 14:36:00.325 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f631c9d78c385d83\n2024-12-14 14:36:00.398 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-14 14:36:00.398 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-14 14:36:00.399 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.16it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.01it/s]\n2024-12-14 14:36:00.514 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.19s\nUsing seed: 47170\n0it [00:00, ?it/s]\n1it [00:00, 8.95it/s]\n2it [00:00, 6.24it/s]\n3it [00:00, 5.68it/s]\n4it [00:00, 5.45it/s]\n5it [00:00, 5.35it/s]\n6it [00:01, 5.25it/s]\n7it [00:01, 5.20it/s]\n8it [00:01, 5.18it/s]\n9it [00:01, 5.16it/s]\n10it [00:01, 5.16it/s]\n11it [00:02, 5.14it/s]\n12it [00:02, 5.14it/s]\n13it [00:02, 5.14it/s]\n14it [00:02, 5.14it/s]\n15it [00:02, 5.14it/s]\n16it [00:03, 5.13it/s]\n17it [00:03, 5.12it/s]\n18it [00:03, 5.14it/s]\n19it [00:03, 5.13it/s]\n20it [00:03, 5.13it/s]\n21it [00:04, 5.12it/s]\n22it [00:04, 5.11it/s]\n23it [00:04, 5.12it/s]\n24it [00:04, 5.12it/s]\n25it [00:04, 5.12it/s]\n26it [00:04, 5.10it/s]\n27it [00:05, 5.10it/s]\n28it [00:05, 5.09it/s]\n28it [00:05, 5.20it/s]\nTotal safe images: 1 out of 1",
"metrics": {
"predict_time": 8.348703184,
"total_time": 8.476994
},
"output": [
"https://replicate.delivery/xezq/XjmWAF5foej9wUWtOADFE6QF9dPkq7Xuert6yVvge6nbdSrPB/out-0.jpg"
],
"started_at": "2024-12-14T14:35:57.842290Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-u7imixo7iydknoog5czl5v6ezniiuppurz7kuvz2qus4xdwk2bsa",
"get": "https://api.replicate.com/v1/predictions/w2y2ep7vj9rma0ckrpca9y023c",
"cancel": "https://api.replicate.com/v1/predictions/w2y2ep7vj9rma0ckrpca9y023c/cancel"
},
"version": "7d174a0ee7e9769758117762f7069646b3478e09cca605c2c2284b349af84f2d"
}
2024-12-14 14:35:57.841 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-14 14:35:57.842 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.72it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.54it/s]
2024-12-14 14:35:57.958 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
free=28809760784384
Downloading weights
2024-12-14T14:35:57Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmparzl_chr/weights url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar
2024-12-14T14:36:00Z | INFO | [ Complete ] dest=/tmp/tmparzl_chr/weights size="172 MB" total_elapsed=2.339s url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar
Downloaded weights in 2.36s
2024-12-14 14:36:00.325 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f631c9d78c385d83
2024-12-14 14:36:00.398 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2024-12-14 14:36:00.398 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-14 14:36:00.399 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.16it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.01it/s]
2024-12-14 14:36:00.514 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.19s
Using seed: 47170
0it [00:00, ?it/s]
1it [00:00, 8.95it/s]
2it [00:00, 6.24it/s]
3it [00:00, 5.68it/s]
4it [00:00, 5.45it/s]
5it [00:00, 5.35it/s]
6it [00:01, 5.25it/s]
7it [00:01, 5.20it/s]
8it [00:01, 5.18it/s]
9it [00:01, 5.16it/s]
10it [00:01, 5.16it/s]
11it [00:02, 5.14it/s]
12it [00:02, 5.14it/s]
13it [00:02, 5.14it/s]
14it [00:02, 5.14it/s]
15it [00:02, 5.14it/s]
16it [00:03, 5.13it/s]
17it [00:03, 5.12it/s]
18it [00:03, 5.14it/s]
19it [00:03, 5.13it/s]
20it [00:03, 5.13it/s]
21it [00:04, 5.12it/s]
22it [00:04, 5.11it/s]
23it [00:04, 5.12it/s]
24it [00:04, 5.12it/s]
25it [00:04, 5.12it/s]
26it [00:04, 5.10it/s]
27it [00:05, 5.10it/s]
28it [00:05, 5.09it/s]
28it [00:05, 5.20it/s]
Total safe images: 1 out of 1
This model costs approximately $0.0062 to run on Replicate, or 161 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia H100 GPU hardware. Predictions typically complete within 5 seconds.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
2024-12-14 14:35:57.841 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-14 14:35:57.842 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.72it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.54it/s]
2024-12-14 14:35:57.958 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
free=28809760784384
Downloading weights
2024-12-14T14:35:57Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmparzl_chr/weights url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar
2024-12-14T14:36:00Z | INFO | [ Complete ] dest=/tmp/tmparzl_chr/weights size="172 MB" total_elapsed=2.339s url=https://replicate.delivery/xezq/wWOi9g1VN6JfJiBNP6gf8GSR6zCLmi1GwgD62YMIeMTg3o1nA/trained_model.tar
Downloaded weights in 2.36s
2024-12-14 14:36:00.325 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f631c9d78c385d83
2024-12-14 14:36:00.398 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2024-12-14 14:36:00.398 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-14 14:36:00.399 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2747.16it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2631.01it/s]
2024-12-14 14:36:00.514 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.19s
Using seed: 47170
0it [00:00, ?it/s]
1it [00:00, 8.95it/s]
2it [00:00, 6.24it/s]
3it [00:00, 5.68it/s]
4it [00:00, 5.45it/s]
5it [00:00, 5.35it/s]
6it [00:01, 5.25it/s]
7it [00:01, 5.20it/s]
8it [00:01, 5.18it/s]
9it [00:01, 5.16it/s]
10it [00:01, 5.16it/s]
11it [00:02, 5.14it/s]
12it [00:02, 5.14it/s]
13it [00:02, 5.14it/s]
14it [00:02, 5.14it/s]
15it [00:02, 5.14it/s]
16it [00:03, 5.13it/s]
17it [00:03, 5.12it/s]
18it [00:03, 5.14it/s]
19it [00:03, 5.13it/s]
20it [00:03, 5.13it/s]
21it [00:04, 5.12it/s]
22it [00:04, 5.11it/s]
23it [00:04, 5.12it/s]
24it [00:04, 5.12it/s]
25it [00:04, 5.12it/s]
26it [00:04, 5.10it/s]
27it [00:05, 5.10it/s]
28it [00:05, 5.09it/s]
28it [00:05, 5.20it/s]
Total safe images: 1 out of 1