Readme
This model doesn't have a readme.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run datiggadev/flux_datigga-selfie using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"datiggadev/flux_datigga-selfie:2776b7e6148abbd2702142ee30d376cf74166d78a7b2d098f301ea4ecea457ea",
{
input: {
model: "dev",
prompt: "photo of tgga as classic circus clown, highly detailed",
go_fast: false,
lora_scale: 1,
megapixels: "1",
num_outputs: 1,
aspect_ratio: "1:1",
output_format: "webp",
guidance_scale: 3,
output_quality: 80,
prompt_strength: 0.8,
extra_lora_scale: 1,
num_inference_steps: 28
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run datiggadev/flux_datigga-selfie using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"datiggadev/flux_datigga-selfie:2776b7e6148abbd2702142ee30d376cf74166d78a7b2d098f301ea4ecea457ea",
input={
"model": "dev",
"prompt": "photo of tgga as classic circus clown, highly detailed",
"go_fast": False,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run datiggadev/flux_datigga-selfie using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "datiggadev/flux_datigga-selfie:2776b7e6148abbd2702142ee30d376cf74166d78a7b2d098f301ea4ecea457ea",
"input": {
"model": "dev",
"prompt": "photo of tgga as classic circus clown, highly detailed",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-12-29T15:53:05.053545Z",
"created_at": "2024-12-29T15:52:55.027000Z",
"data_removed": false,
"error": null,
"id": "pfdct4pkydrma0cm2cfb3e510r",
"input": {
"model": "dev",
"prompt": "photo of tgga as classic circus clown, highly detailed",
"go_fast": false,
"lora_scale": 1,
"megapixels": "1",
"num_outputs": 1,
"aspect_ratio": "1:1",
"output_format": "webp",
"guidance_scale": 3,
"output_quality": 80,
"prompt_strength": 0.8,
"extra_lora_scale": 1,
"num_inference_steps": 28
},
"logs": "2024-12-29 15:52:55.079 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 15:52:55.080 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2796.67it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2677.84it/s]\n2024-12-29 15:52:55.193 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s\nfree=30154739773440\nDownloading weights\n2024-12-29T15:52:55Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmppxdfzuv4/weights url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar\n2024-12-29T15:52:58Z | INFO | [ Complete ] dest=/tmp/tmppxdfzuv4/weights size=\"172 MB\" total_elapsed=3.517s url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar\nDownloaded weights in 3.54s\n2024-12-29 15:52:58.736 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/7700762c7c790fe9\n2024-12-29 15:52:58.806 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded\n2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys\n2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2801.19it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2681.85it/s]\n2024-12-29 15:52:58.920 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s\nUsing seed: 17591\n0it [00:00, ?it/s]\n1it [00:00, 8.36it/s]\n2it [00:00, 5.83it/s]\n3it [00:00, 5.32it/s]\n4it [00:00, 5.11it/s]\n5it [00:00, 4.97it/s]\n6it [00:01, 4.90it/s]\n7it [00:01, 4.87it/s]\n8it [00:01, 4.85it/s]\n9it [00:01, 4.85it/s]\n10it [00:01, 4.83it/s]\n11it [00:02, 4.81it/s]\n12it [00:02, 4.81it/s]\n13it [00:02, 4.81it/s]\n14it [00:02, 4.80it/s]\n15it [00:03, 4.80it/s]\n16it [00:03, 4.80it/s]\n17it [00:03, 4.80it/s]\n18it [00:03, 4.81it/s]\n19it [00:03, 4.81it/s]\n20it [00:04, 4.80it/s]\n21it [00:04, 4.80it/s]\n22it [00:04, 4.80it/s]\n23it [00:04, 4.80it/s]\n24it [00:04, 4.81it/s]\n25it [00:05, 4.80it/s]\n26it [00:05, 4.79it/s]\n27it [00:05, 4.79it/s]\n28it [00:05, 4.80it/s]\n28it [00:05, 4.87it/s]\nTotal safe images: 1 out of 1",
"metrics": {
"predict_time": 9.973403873,
"total_time": 10.026545
},
"output": [
"https://replicate.delivery/xezq/xFKE3MKMc54HCN4a6xW3FfYQvvxN1G2nJ9Iwbc6jweeCTkfPB/out-0.webp"
],
"started_at": "2024-12-29T15:52:55.080141Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-e2m3acyvnh5u2j66vc6beqovdcdrmldb44yahlyja65crs23krqa",
"get": "https://api.replicate.com/v1/predictions/pfdct4pkydrma0cm2cfb3e510r",
"cancel": "https://api.replicate.com/v1/predictions/pfdct4pkydrma0cm2cfb3e510r/cancel"
},
"version": "2776b7e6148abbd2702142ee30d376cf74166d78a7b2d098f301ea4ecea457ea"
}
2024-12-29 15:52:55.079 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-29 15:52:55.080 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2796.67it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2677.84it/s]
2024-12-29 15:52:55.193 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s
free=30154739773440
Downloading weights
2024-12-29T15:52:55Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmppxdfzuv4/weights url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar
2024-12-29T15:52:58Z | INFO | [ Complete ] dest=/tmp/tmppxdfzuv4/weights size="172 MB" total_elapsed=3.517s url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar
Downloaded weights in 3.54s
2024-12-29 15:52:58.736 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/7700762c7c790fe9
2024-12-29 15:52:58.806 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2801.19it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2681.85it/s]
2024-12-29 15:52:58.920 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s
Using seed: 17591
0it [00:00, ?it/s]
1it [00:00, 8.36it/s]
2it [00:00, 5.83it/s]
3it [00:00, 5.32it/s]
4it [00:00, 5.11it/s]
5it [00:00, 4.97it/s]
6it [00:01, 4.90it/s]
7it [00:01, 4.87it/s]
8it [00:01, 4.85it/s]
9it [00:01, 4.85it/s]
10it [00:01, 4.83it/s]
11it [00:02, 4.81it/s]
12it [00:02, 4.81it/s]
13it [00:02, 4.81it/s]
14it [00:02, 4.80it/s]
15it [00:03, 4.80it/s]
16it [00:03, 4.80it/s]
17it [00:03, 4.80it/s]
18it [00:03, 4.81it/s]
19it [00:03, 4.81it/s]
20it [00:04, 4.80it/s]
21it [00:04, 4.80it/s]
22it [00:04, 4.80it/s]
23it [00:04, 4.80it/s]
24it [00:04, 4.81it/s]
25it [00:05, 4.80it/s]
26it [00:05, 4.79it/s]
27it [00:05, 4.79it/s]
28it [00:05, 4.80it/s]
28it [00:05, 4.87it/s]
Total safe images: 1 out of 1
This model runs on Nvidia H100 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is warm. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
2024-12-29 15:52:55.079 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-29 15:52:55.080 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2796.67it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2677.84it/s]
2024-12-29 15:52:55.193 | SUCCESS | fp8.lora_loading:unload_loras:564 - LoRAs unloaded in 0.11s
free=30154739773440
Downloading weights
2024-12-29T15:52:55Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmppxdfzuv4/weights url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar
2024-12-29T15:52:58Z | INFO | [ Complete ] dest=/tmp/tmppxdfzuv4/weights size="172 MB" total_elapsed=3.517s url=https://replicate.delivery/xezq/LdmjBlrjbEa0B93HcxwFSP0hCX7eFUEu3cylYJ4wiJ3yB5fTA/trained_model.tar
Downloaded weights in 3.54s
2024-12-29 15:52:58.736 | INFO | fp8.lora_loading:convert_lora_weights:498 - Loading LoRA weights for /src/weights-cache/7700762c7c790fe9
2024-12-29 15:52:58.806 | INFO | fp8.lora_loading:convert_lora_weights:519 - LoRA weights loaded
2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:574 - Extracting keys
2024-12-29 15:52:58.806 | DEBUG | fp8.lora_loading:apply_lora_to_model:581 - Keys extracted
Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s]
Applying LoRA: 92%|█████████▏| 281/304 [00:00<00:00, 2801.19it/s]
Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2681.85it/s]
2024-12-29 15:52:58.920 | SUCCESS | fp8.lora_loading:load_lora:539 - LoRA applied in 0.18s
Using seed: 17591
0it [00:00, ?it/s]
1it [00:00, 8.36it/s]
2it [00:00, 5.83it/s]
3it [00:00, 5.32it/s]
4it [00:00, 5.11it/s]
5it [00:00, 4.97it/s]
6it [00:01, 4.90it/s]
7it [00:01, 4.87it/s]
8it [00:01, 4.85it/s]
9it [00:01, 4.85it/s]
10it [00:01, 4.83it/s]
11it [00:02, 4.81it/s]
12it [00:02, 4.81it/s]
13it [00:02, 4.81it/s]
14it [00:02, 4.80it/s]
15it [00:03, 4.80it/s]
16it [00:03, 4.80it/s]
17it [00:03, 4.80it/s]
18it [00:03, 4.81it/s]
19it [00:03, 4.81it/s]
20it [00:04, 4.80it/s]
21it [00:04, 4.80it/s]
22it [00:04, 4.80it/s]
23it [00:04, 4.80it/s]
24it [00:04, 4.81it/s]
25it [00:05, 4.80it/s]
26it [00:05, 4.79it/s]
27it [00:05, 4.79it/s]
28it [00:05, 4.80it/s]
28it [00:05, 4.87it/s]
Total safe images: 1 out of 1