Readme
This model doesn't have a readme.
(Updated 4 months, 3 weeks ago)
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run kondagen/flux-konda-kpradeep using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"kondagen/flux-konda-kpradeep:1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac",
{
input: {
model: "dev",
prompt: "A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time.",
go_fast: false,
lora_scale: 1,
megapixels: "1",
num_outputs: 1,
aspect_ratio: "1:1",
output_format: "webp",
guidance_scale: 3,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run kondagen/flux-konda-kpradeep using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"kondagen/flux-konda-kpradeep:1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac",
input={
"model": "dev",
"prompt": "A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time.",
"go_fast": False,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run kondagen/flux-konda-kpradeep using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "kondagen/flux-konda-kpradeep:1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac",
"input": {
"model": "dev",
"prompt": "A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time.",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/kondagen/flux-konda-kpradeep@sha256:1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac \
-i 'model="dev"' \
-i 'prompt="A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time."' \
-i 'go_fast=false' \
-i 'lora_scale=1' \
-i 'megapixels="1"' \
-i 'num_outputs=1' \
-i 'aspect_ratio="1:1"' \
-i 'output_format="webp"' \
-i 'guidance_scale=3' \
-i 'output_quality=80' \
-i 'prompt_strength=0.8' \
-i 'extra_lora_scale=1' \
-i 'num_inference_steps=28'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/kondagen/flux-konda-kpradeep@sha256:1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "model": "dev", "prompt": "A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time.", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2025-01-21T08:15:58.998327Z",
"created_at": "2025-01-21T08:15:51.218000Z",
"data_removed": false,
"error": null,
"id": "pg6s7vx3p9rme0cmgzqbrt23pm",
"input": {
"model": "dev",
"prompt": "A photo of KOPRA the police officer age around 50 years sitting in bar club and having beer. Background people sitting and drinking and chatting. Its inside the bar at the night time.",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
},
"logs": "2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2768.71it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2657.41it/s]\n2025-01-21 08:15:51.349 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s\nfree=29911289065472\nDownloading weights\n2025-01-21T08:15:51Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp9nku7zok/weights url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar\n2025-01-21T08:15:52Z | INFO | [ Complete ] dest=/tmp/tmp9nku7zok/weights size=\"172 MB\" total_elapsed=1.402s url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar\nDownloaded weights in 1.42s\n2025-01-21 08:15:52.774 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f5fbd8ea2b6abf3a\n2025-01-21 08:15:52.843 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2772.24it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2660.07it/s]\n2025-01-21 08:15:52.958 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s\nUsing seed: 50155\n0it [00:00, ?it/s]\n1it [00:00, 8.42it/s]\n2it [00:00, 5.90it/s]\n3it [00:00, 5.39it/s]\n4it [00:00, 5.17it/s]\n5it [00:00, 5.06it/s]\n6it [00:01, 4.98it/s]\n7it [00:01, 4.94it/s]\n8it [00:01, 4.91it/s]\n9it [00:01, 4.89it/s]\n10it [00:01, 4.87it/s]\n11it [00:02, 4.87it/s]\n12it [00:02, 4.86it/s]\n13it [00:02, 4.86it/s]\n14it [00:02, 4.86it/s]\n15it [00:03, 4.86it/s]\n16it [00:03, 4.85it/s]\n17it [00:03, 4.85it/s]\n18it [00:03, 4.85it/s]\n19it [00:03, 4.85it/s]\n20it [00:04, 4.85it/s]\n21it [00:04, 4.86it/s]\n22it [00:04, 4.85it/s]\n23it [00:04, 4.85it/s]\n24it [00:04, 4.86it/s]\n25it [00:05, 4.86it/s]\n26it [00:05, 4.86it/s]\n27it [00:05, 4.86it/s]\n28it [00:05, 4.86it/s]\n28it [00:05, 4.93it/s]\nTotal safe images: 1 out of 1",
"metrics": {
"predict_time": 7.763457314,
"total_time": 7.780327
},
"output": [
"https://replicate.delivery/xezq/kCiA3RtMeTQZBS7eBaVs62S9LOSFrFVeqmn8m3lORfW6bCdQB/out-0.webp"
],
"started_at": "2025-01-21T08:15:51.234869Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-dj5zybi6iy75aahdl7pjzobywllaxm5b3vlwrdxcdbjrhuwvsz6a",
"get": "https://api.replicate.com/v1/predictions/pg6s7vx3p9rme0cmgzqbrt23pm",
"cancel": "https://api.replicate.com/v1/predictions/pg6s7vx3p9rme0cmgzqbrt23pm/cancel"
},
"version": "1ca102c050e9c5460749f942b8e7631d0b37d88299c0ed6c338ee217669cfdac"
}
2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2768.71it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2657.41it/s]
2025-01-21 08:15:51.349 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
free=29911289065472
Downloading weights
2025-01-21T08:15:51Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp9nku7zok/weights url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar
2025-01-21T08:15:52Z | INFO | [ Complete ] dest=/tmp/tmp9nku7zok/weights size="172 MB" total_elapsed=1.402s url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar
Downloaded weights in 1.42s
2025-01-21 08:15:52.774 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f5fbd8ea2b6abf3a
2025-01-21 08:15:52.843 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2772.24it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2660.07it/s]
2025-01-21 08:15:52.958 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s
Using seed: 50155
0it [00:00, ?it/s]
1it [00:00, 8.42it/s]
2it [00:00, 5.90it/s]
3it [00:00, 5.39it/s]
4it [00:00, 5.17it/s]
5it [00:00, 5.06it/s]
6it [00:01, 4.98it/s]
7it [00:01, 4.94it/s]
8it [00:01, 4.91it/s]
9it [00:01, 4.89it/s]
10it [00:01, 4.87it/s]
11it [00:02, 4.87it/s]
12it [00:02, 4.86it/s]
13it [00:02, 4.86it/s]
14it [00:02, 4.86it/s]
15it [00:03, 4.86it/s]
16it [00:03, 4.85it/s]
17it [00:03, 4.85it/s]
18it [00:03, 4.85it/s]
19it [00:03, 4.85it/s]
20it [00:04, 4.85it/s]
21it [00:04, 4.86it/s]
22it [00:04, 4.85it/s]
23it [00:04, 4.85it/s]
24it [00:04, 4.86it/s]
25it [00:05, 4.86it/s]
26it [00:05, 4.86it/s]
27it [00:05, 4.86it/s]
28it [00:05, 4.86it/s]
28it [00:05, 4.93it/s]
Total safe images: 1 out of 1
This model runs on Nvidia H100 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-01-21 08:15:51.234 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2768.71it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2657.41it/s]
2025-01-21 08:15:51.349 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.12s
free=29911289065472
Downloading weights
2025-01-21T08:15:51Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp9nku7zok/weights url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar
2025-01-21T08:15:52Z | INFO | [ Complete ] dest=/tmp/tmp9nku7zok/weights size="172 MB" total_elapsed=1.402s url=https://replicate.delivery/xezq/vgGn1CWy3B68PVa6Ugw67aHtJhOChQ3eqJK2PJMp4B5z2nDKA/trained_model.tar
Downloaded weights in 1.42s
2025-01-21 08:15:52.774 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/f5fbd8ea2b6abf3a
2025-01-21 08:15:52.843 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2025-01-21 08:15:52.844 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 91%|█████████▏| 278/304 [00:00<00:00, 2772.24it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2660.07it/s]
2025-01-21 08:15:52.958 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s
Using seed: 50155
0it [00:00, ?it/s]
1it [00:00, 8.42it/s]
2it [00:00, 5.90it/s]
3it [00:00, 5.39it/s]
4it [00:00, 5.17it/s]
5it [00:00, 5.06it/s]
6it [00:01, 4.98it/s]
7it [00:01, 4.94it/s]
8it [00:01, 4.91it/s]
9it [00:01, 4.89it/s]
10it [00:01, 4.87it/s]
11it [00:02, 4.87it/s]
12it [00:02, 4.86it/s]
13it [00:02, 4.86it/s]
14it [00:02, 4.86it/s]
15it [00:03, 4.86it/s]
16it [00:03, 4.85it/s]
17it [00:03, 4.85it/s]
18it [00:03, 4.85it/s]
19it [00:03, 4.85it/s]
20it [00:04, 4.85it/s]
21it [00:04, 4.86it/s]
22it [00:04, 4.85it/s]
23it [00:04, 4.85it/s]
24it [00:04, 4.86it/s]
25it [00:05, 4.86it/s]
26it [00:05, 4.86it/s]
27it [00:05, 4.86it/s]
28it [00:05, 4.86it/s]
28it [00:05, 4.93it/s]
Total safe images: 1 out of 1