marcinkleczek
/
pkn
- Public
- 182 runs
-
H100
Prediction
marcinkleczek/pkn:42c0c1a0IDjpanj2ewzxrma0ckgzbb0qt0g8StatusSucceededSourceWebHardwareH100Total durationCreatedInput
- model
- dev
- prompt
- PKN Building in the middle of eastern europe middle 70s in the middle of a day
- go_fast
- lora_scale
- 1
- megapixels
- 1
- num_outputs
- 1
- aspect_ratio
- 1:1
- output_format
- webp
- guidance_scale
- 3
- output_quality
- 80
- prompt_strength
- 0.8
- extra_lora_scale
- 1
- num_inference_steps
- 28
{ "model": "dev", "prompt": "PKN Building in the middle of eastern europe middle 70s in the middle of a day", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run marcinkleczek/pkn using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "marcinkleczek/pkn:42c0c1a0dba63ebbdafda43401adc737d6de55981373d84c6de98d34159dc843", { input: { model: "dev", prompt: "PKN Building in the middle of eastern europe middle 70s in the middle of a day", go_fast: false, lora_scale: 1, megapixels: "1", num_outputs: 1, aspect_ratio: "1:1", output_format: "webp", guidance_scale: 3, output_quality: 80, prompt_strength: 0.8, extra_lora_scale: 1, num_inference_steps: 28 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run marcinkleczek/pkn using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "marcinkleczek/pkn:42c0c1a0dba63ebbdafda43401adc737d6de55981373d84c6de98d34159dc843", input={ "model": "dev", "prompt": "PKN Building in the middle of eastern europe middle 70s in the middle of a day", "go_fast": False, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run marcinkleczek/pkn using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "42c0c1a0dba63ebbdafda43401adc737d6de55981373d84c6de98d34159dc843", "input": { "model": "dev", "prompt": "PKN Building in the middle of eastern europe middle 70s in the middle of a day", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-12-02T14:47:13.603467Z", "created_at": "2024-12-02T14:47:05.727000Z", "data_removed": false, "error": null, "id": "jpanj2ewzxrma0ckgzbb0qt0g8", "input": { "model": "dev", "prompt": "PKN Building in the middle of eastern europe middle 70s in the middle of a day", "go_fast": false, "lora_scale": 1, "megapixels": "1", "num_outputs": 1, "aspect_ratio": "1:1", "output_format": "webp", "guidance_scale": 3, "output_quality": 80, "prompt_strength": 0.8, "extra_lora_scale": 1, "num_inference_steps": 28 }, "logs": "2024-12-02 14:47:05.820 | DEBUG | fp8.lora_loading:apply_lora_to_model:569 - Extracting keys\n2024-12-02 14:47:05.820 | DEBUG | fp8.lora_loading:apply_lora_to_model:576 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 94%|█████████▍| 286/304 [00:00<00:00, 2859.51it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2760.97it/s]\n2024-12-02 14:47:05.931 | SUCCESS | fp8.lora_loading:unload_loras:559 - LoRAs unloaded in 0.11s\nfree=29055504891904\nDownloading weights\n2024-12-02T14:47:05Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmps6awl4mj/weights url=https://replicate.delivery/xezq/3j5UbIaqa6aSJJs29mfz4FZgeQz6wyFDhNfUwYEZpDqAQvtnA/trained_model.tar\n2024-12-02T14:47:07Z | INFO | [ Complete ] dest=/tmp/tmps6awl4mj/weights size=\"172 MB\" total_elapsed=1.358s url=https://replicate.delivery/xezq/3j5UbIaqa6aSJJs29mfz4FZgeQz6wyFDhNfUwYEZpDqAQvtnA/trained_model.tar\nDownloaded weights in 1.38s\n2024-12-02 14:47:07.314 | INFO | fp8.lora_loading:convert_lora_weights:493 - Loading LoRA weights for /src/weights-cache/04f582cce0d0425b\n2024-12-02 14:47:07.386 | INFO | fp8.lora_loading:convert_lora_weights:514 - LoRA weights loaded\n2024-12-02 14:47:07.386 | DEBUG | fp8.lora_loading:apply_lora_to_model:569 - Extracting keys\n2024-12-02 14:47:07.387 | DEBUG | fp8.lora_loading:apply_lora_to_model:576 - Keys extracted\nApplying LoRA: 0%| | 0/304 [00:00<?, ?it/s]\nApplying LoRA: 92%|█████████▏| 279/304 [00:00<00:00, 2775.81it/s]\nApplying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2645.57it/s]\n2024-12-02 14:47:07.502 | SUCCESS | fp8.lora_loading:load_lora:534 - LoRA applied in 0.19s\nUsing seed: 63448\n0it [00:00, ?it/s]\n1it [00:00, 8.39it/s]\n2it [00:00, 5.87it/s]\n3it [00:00, 5.36it/s]\n4it [00:00, 5.14it/s]\n5it [00:00, 5.02it/s]\n6it [00:01, 4.94it/s]\n7it [00:01, 4.90it/s]\n8it [00:01, 4.88it/s]\n9it [00:01, 4.86it/s]\n10it [00:01, 4.84it/s]\n11it [00:02, 4.84it/s]\n12it [00:02, 4.84it/s]\n13it [00:02, 4.84it/s]\n14it [00:02, 4.84it/s]\n15it [00:03, 4.83it/s]\n16it [00:03, 4.82it/s]\n17it [00:03, 4.82it/s]\n18it [00:03, 4.81it/s]\n19it [00:03, 4.82it/s]\n20it [00:04, 4.82it/s]\n21it [00:04, 4.81it/s]\n22it [00:04, 4.81it/s]\n23it [00:04, 4.80it/s]\n24it [00:04, 4.80it/s]\n25it [00:05, 4.80it/s]\n26it [00:05, 4.81it/s]\n27it [00:05, 4.81it/s]\n28it [00:05, 4.81it/s]\n28it [00:05, 4.89it/s]\nTotal safe images: 1 out of 1", "metrics": { "predict_time": 7.782064436, "total_time": 7.876467 }, "output": [ "https://replicate.delivery/xezq/PGrjv2JRspbmNhPDacSyvV8QmjPGe1tevPxL8e2thezHne2eE/out-0.webp" ], "started_at": "2024-12-02T14:47:05.821403Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/bcwr-xkmasdizngz7pcxaczkzsfdxcf4ta3cr47xgdatavm5b4nzi2m4q", "get": "https://api.replicate.com/v1/predictions/jpanj2ewzxrma0ckgzbb0qt0g8", "cancel": "https://api.replicate.com/v1/predictions/jpanj2ewzxrma0ckgzbb0qt0g8/cancel" }, "version": "42c0c1a0dba63ebbdafda43401adc737d6de55981373d84c6de98d34159dc843" }
Generated in2024-12-02 14:47:05.820 | DEBUG | fp8.lora_loading:apply_lora_to_model:569 - Extracting keys 2024-12-02 14:47:05.820 | DEBUG | fp8.lora_loading:apply_lora_to_model:576 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 94%|█████████▍| 286/304 [00:00<00:00, 2859.51it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2760.97it/s] 2024-12-02 14:47:05.931 | SUCCESS | fp8.lora_loading:unload_loras:559 - LoRAs unloaded in 0.11s free=29055504891904 Downloading weights 2024-12-02T14:47:05Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmps6awl4mj/weights url=https://replicate.delivery/xezq/3j5UbIaqa6aSJJs29mfz4FZgeQz6wyFDhNfUwYEZpDqAQvtnA/trained_model.tar 2024-12-02T14:47:07Z | INFO | [ Complete ] dest=/tmp/tmps6awl4mj/weights size="172 MB" total_elapsed=1.358s url=https://replicate.delivery/xezq/3j5UbIaqa6aSJJs29mfz4FZgeQz6wyFDhNfUwYEZpDqAQvtnA/trained_model.tar Downloaded weights in 1.38s 2024-12-02 14:47:07.314 | INFO | fp8.lora_loading:convert_lora_weights:493 - Loading LoRA weights for /src/weights-cache/04f582cce0d0425b 2024-12-02 14:47:07.386 | INFO | fp8.lora_loading:convert_lora_weights:514 - LoRA weights loaded 2024-12-02 14:47:07.386 | DEBUG | fp8.lora_loading:apply_lora_to_model:569 - Extracting keys 2024-12-02 14:47:07.387 | DEBUG | fp8.lora_loading:apply_lora_to_model:576 - Keys extracted Applying LoRA: 0%| | 0/304 [00:00<?, ?it/s] Applying LoRA: 92%|█████████▏| 279/304 [00:00<00:00, 2775.81it/s] Applying LoRA: 100%|██████████| 304/304 [00:00<00:00, 2645.57it/s] 2024-12-02 14:47:07.502 | SUCCESS | fp8.lora_loading:load_lora:534 - LoRA applied in 0.19s Using seed: 63448 0it [00:00, ?it/s] 1it [00:00, 8.39it/s] 2it [00:00, 5.87it/s] 3it [00:00, 5.36it/s] 4it [00:00, 5.14it/s] 5it [00:00, 5.02it/s] 6it [00:01, 4.94it/s] 7it [00:01, 4.90it/s] 8it [00:01, 4.88it/s] 9it [00:01, 4.86it/s] 10it [00:01, 4.84it/s] 11it [00:02, 4.84it/s] 12it [00:02, 4.84it/s] 13it [00:02, 4.84it/s] 14it [00:02, 4.84it/s] 15it [00:03, 4.83it/s] 16it [00:03, 4.82it/s] 17it [00:03, 4.82it/s] 18it [00:03, 4.81it/s] 19it [00:03, 4.82it/s] 20it [00:04, 4.82it/s] 21it [00:04, 4.81it/s] 22it [00:04, 4.81it/s] 23it [00:04, 4.80it/s] 24it [00:04, 4.80it/s] 25it [00:05, 4.80it/s] 26it [00:05, 4.81it/s] 27it [00:05, 4.81it/s] 28it [00:05, 4.81it/s] 28it [00:05, 4.89it/s] Total safe images: 1 out of 1
Want to make some of these yourself?
Run this model