Readme
This model doesn't have a readme.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run buddhiraz/liveportraitv1 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"buddhiraz/liveportraitv1:8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44",
{
input: {
flag_pasteback: true,
input_image_path: "https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png",
input_video_path: "https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4",
flag_do_crop_input: true,
flag_relative_input: true,
live_portrait_dsize: 512,
live_portrait_scale: 2.3,
video_frame_load_cap: 128,
live_portrait_lip_zero: true,
live_portrait_relative: true,
live_portrait_vx_ratio: 0,
live_portrait_vy_ratio: -0.12,
live_portrait_stitching: true,
video_select_every_n_frames: 1,
live_portrait_eye_retargeting: false,
live_portrait_lip_retargeting: false,
live_portrait_lip_retargeting_multiplier: 1,
live_portrait_eyes_retargeting_multiplier: 1
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run buddhiraz/liveportraitv1 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"buddhiraz/liveportraitv1:8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44",
input={
"flag_pasteback": True,
"input_image_path": "https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png",
"input_video_path": "https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4",
"flag_do_crop_input": True,
"flag_relative_input": True,
"live_portrait_dsize": 512,
"live_portrait_scale": 2.3,
"video_frame_load_cap": 128,
"live_portrait_lip_zero": True,
"live_portrait_relative": True,
"live_portrait_vx_ratio": 0,
"live_portrait_vy_ratio": -0.12,
"live_portrait_stitching": True,
"video_select_every_n_frames": 1,
"live_portrait_eye_retargeting": False,
"live_portrait_lip_retargeting": False,
"live_portrait_lip_retargeting_multiplier": 1,
"live_portrait_eyes_retargeting_multiplier": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run buddhiraz/liveportraitv1 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "buddhiraz/liveportraitv1:8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44",
"input": {
"flag_pasteback": true,
"input_image_path": "https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png",
"input_video_path": "https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4",
"flag_do_crop_input": true,
"flag_relative_input": true,
"live_portrait_dsize": 512,
"live_portrait_scale": 2.3,
"video_frame_load_cap": 128,
"live_portrait_lip_zero": true,
"live_portrait_relative": true,
"live_portrait_vx_ratio": 0,
"live_portrait_vy_ratio": -0.12,
"live_portrait_stitching": true,
"video_select_every_n_frames": 1,
"live_portrait_eye_retargeting": false,
"live_portrait_lip_retargeting": false,
"live_portrait_lip_retargeting_multiplier": 1,
"live_portrait_eyes_retargeting_multiplier": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/buddhiraz/liveportraitv1@sha256:8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44 \
-i 'flag_pasteback=true' \
-i 'input_image_path="https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png"' \
-i 'input_video_path="https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4"' \
-i 'flag_do_crop_input=true' \
-i 'flag_relative_input=true' \
-i 'live_portrait_dsize=512' \
-i 'live_portrait_scale=2.3' \
-i 'video_frame_load_cap=128' \
-i 'live_portrait_lip_zero=true' \
-i 'live_portrait_relative=true' \
-i 'live_portrait_vx_ratio=0' \
-i 'live_portrait_vy_ratio=-0.12' \
-i 'live_portrait_stitching=true' \
-i 'video_select_every_n_frames=1' \
-i 'live_portrait_eye_retargeting=false' \
-i 'live_portrait_lip_retargeting=false' \
-i 'live_portrait_lip_retargeting_multiplier=1' \
-i 'live_portrait_eyes_retargeting_multiplier=1'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/buddhiraz/liveportraitv1@sha256:8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "flag_pasteback": true, "input_image_path": "https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png", "input_video_path": "https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4", "flag_do_crop_input": true, "flag_relative_input": true, "live_portrait_dsize": 512, "live_portrait_scale": 2.3, "video_frame_load_cap": 128, "live_portrait_lip_zero": true, "live_portrait_relative": true, "live_portrait_vx_ratio": 0, "live_portrait_vy_ratio": -0.12, "live_portrait_stitching": true, "video_select_every_n_frames": 1, "live_portrait_eye_retargeting": false, "live_portrait_lip_retargeting": false, "live_portrait_lip_retargeting_multiplier": 1, "live_portrait_eyes_retargeting_multiplier": 1 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-07-12T20:03:30.837862Z",
"created_at": "2024-07-12T19:59:01.656000Z",
"data_removed": false,
"error": null,
"id": "rhc3tdge31rga0cgn23t55h0p4",
"input": {
"flag_pasteback": true,
"input_image_path": "https://replicate.delivery/pbxt/LFzuXbwINYxNv26erUHNYNR85H3hROWAHbnR2nCsfhf4YHzA/Screenshot%20from%202024-07-08%2014-50-41.png",
"input_video_path": "https://replicate.delivery/pbxt/LFzuXFbzMx12AJnrrpRNqXuwDXpTCEoLajXvo1LC3GSCFlHk/d14.mp4",
"flag_do_crop_input": true,
"flag_relative_input": true,
"live_portrait_dsize": 512,
"live_portrait_scale": 2.3,
"video_frame_load_cap": 128,
"live_portrait_lip_zero": true,
"live_portrait_relative": true,
"live_portrait_vx_ratio": 0,
"live_portrait_vy_ratio": -0.12,
"live_portrait_stitching": true,
"video_select_every_n_frames": 1,
"live_portrait_eye_retargeting": false,
"live_portrait_lip_retargeting": false,
"live_portrait_lip_retargeting_multiplier": 1,
"live_portrait_eyes_retargeting_multiplier": 1
},
"logs": "[20:01:27] Load source image from live_portrait_pipeline.py:49\n/tmp/tmpbuvf7ptkScreenshot from\n2024-07-08 14-50-41.png\ntensor([[[[0.7059, 0.9490, 0.9451, ..., 0.0039, 0.0039, 0.0000],\n[0.5098, 0.9059, 0.9529, ..., 0.0078, 0.0039, 0.0000],\n[0.5765, 0.8824, 0.9725, ..., 0.0039, 0.0000, 0.0000],\n...,\n[0.8980, 0.8824, 0.7686, ..., 0.8471, 0.8471, 0.8353],\n[0.8902, 0.8392, 0.6667, ..., 0.8431, 0.8471, 0.8353],\n[0.8588, 0.7412, 0.5020, ..., 0.8157, 0.8392, 0.8392]],\n[[0.7333, 0.9725, 0.9725, ..., 0.0392, 0.0471, 0.0471],\n[0.5373, 0.9333, 0.9804, ..., 0.0431, 0.0471, 0.0471],\n[0.6039, 0.9098, 0.9961, ..., 0.0471, 0.0471, 0.0471],\n...,\n[0.7020, 0.7176, 0.6392, ..., 0.6745, 0.6863, 0.6863],\n[0.6980, 0.6706, 0.5373, ..., 0.6706, 0.6863, 0.6902],\n[0.6902, 0.6078, 0.4118, ..., 0.6431, 0.6745, 0.6902]],\n[[0.7333, 0.9725, 0.9725, ..., 0.0353, 0.0392, 0.0392],\n[0.5373, 0.9333, 0.9765, ..., 0.0392, 0.0392, 0.0392],\n[0.6039, 0.9098, 0.9961, ..., 0.0353, 0.0353, 0.0353],\n...,\n[0.5765, 0.5922, 0.5176, ..., 0.4706, 0.4824, 0.4745],\n[0.5725, 0.5490, 0.4235, ..., 0.4706, 0.4824, 0.4784],\n[0.5843, 0.5176, 0.3294, ..., 0.4431, 0.4706, 0.4824]]]],\ndevice='cuda:0')\n[20:01:29] Load video: /tmp/tmpqda7wxbqd14.mp4 live_portrait_pipeline.py:99\nThe FPS of /tmp/tmpqda7wxbqd14.mp4 is: live_portrait_pipeline.py:104\n30\nLoad video file (mp4 mov avi etc...): live_portrait_pipeline.py:106\n/tmp/tmpqda7wxbqd14.mp4\n[20:01:31] Start making motion template... live_portrait_pipeline.py:110\nMaking motion templates... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:09\n[20:01:48] Dump motion template to live_portrait_pipeline.py:127\n/tmp/tmpqda7wxbqd14.pkl\nPrepared pasteback mask done. live_portrait_pipeline.py:139\n🚀Animating... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:01:17\nConcatenating result... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:03\n[20:03:20] Video with audio generated successfully: video.py:197\n/tmp/tmpbuvf7ptkScreenshot from 2024-07-08\n14-50-41--tmpqda7wxbqd14_concat_with_audio.mp4\nReplace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:230\n2024-07-08\n14-50-41--tmpqda7wxbqd14_concat.mp4\nwith /tmp/tmpbuvf7ptkScreenshot from\n2024-07-08\n14-50-41--tmpqda7wxbqd14_concat_with_au\ndio.mp4\n[swscaler @ 0x568f240] Warning: data is not aligned! This can lead to a speed loss\n[20:03:30] Video with audio generated successfully: video.py:197\n/tmp/tmpbuvf7ptkScreenshot from 2024-07-08\n14-50-41--tmpqda7wxbqd14_with_audio.mp4\nReplace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:244\n2024-07-08 14-50-41--tmpqda7wxbqd14.mp4\nwith /tmp/tmpbuvf7ptkScreenshot from\n2024-07-08\n14-50-41--tmpqda7wxbqd14_with_audio.mp4\nAnimated template: live_portrait_pipeline.py:248\n/tmp/tmpqda7wxbqd14.pkl, you can\nspecify `-d` argument with this\ntemplate path next time to avoid\ncropping video, motion making and\nprotecting privacy.\nAnimated video: live_portrait_pipeline.py:249\n/tmp/tmpbuvf7ptkScreenshot from\n2024-07-08 14-50-41--tmpqda7wxbqd14.mp4\nAnimated video with concact: live_portrait_pipeline.py:250\n/tmp/tmpbuvf7ptkScreenshot from\n2024-07-08\n14-50-41--tmpqda7wxbqd14_concat.mp4",
"metrics": {
"predict_time": 123.935499453,
"total_time": 269.181862
},
"output": "https://replicate.delivery/czjl/nei1zWuTBvTNJapC71F83epbK6GtHkbyHN9H8xhMlGrS4zHTA/tmpbuvf7ptkScreenshot%20from%202024-07-08%2014-50-41--tmpqda7wxbqd14.mp4",
"started_at": "2024-07-12T20:01:26.902362Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/rhc3tdge31rga0cgn23t55h0p4",
"cancel": "https://api.replicate.com/v1/predictions/rhc3tdge31rga0cgn23t55h0p4/cancel"
},
"version": "8514ed7670d55e3e4811ee4003bb1741c15b583380a40e711833eadb988d2f44"
}
[20:01:27] Load source image from live_portrait_pipeline.py:49
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08 14-50-41.png
tensor([[[[0.7059, 0.9490, 0.9451, ..., 0.0039, 0.0039, 0.0000],
[0.5098, 0.9059, 0.9529, ..., 0.0078, 0.0039, 0.0000],
[0.5765, 0.8824, 0.9725, ..., 0.0039, 0.0000, 0.0000],
...,
[0.8980, 0.8824, 0.7686, ..., 0.8471, 0.8471, 0.8353],
[0.8902, 0.8392, 0.6667, ..., 0.8431, 0.8471, 0.8353],
[0.8588, 0.7412, 0.5020, ..., 0.8157, 0.8392, 0.8392]],
[[0.7333, 0.9725, 0.9725, ..., 0.0392, 0.0471, 0.0471],
[0.5373, 0.9333, 0.9804, ..., 0.0431, 0.0471, 0.0471],
[0.6039, 0.9098, 0.9961, ..., 0.0471, 0.0471, 0.0471],
...,
[0.7020, 0.7176, 0.6392, ..., 0.6745, 0.6863, 0.6863],
[0.6980, 0.6706, 0.5373, ..., 0.6706, 0.6863, 0.6902],
[0.6902, 0.6078, 0.4118, ..., 0.6431, 0.6745, 0.6902]],
[[0.7333, 0.9725, 0.9725, ..., 0.0353, 0.0392, 0.0392],
[0.5373, 0.9333, 0.9765, ..., 0.0392, 0.0392, 0.0392],
[0.6039, 0.9098, 0.9961, ..., 0.0353, 0.0353, 0.0353],
...,
[0.5765, 0.5922, 0.5176, ..., 0.4706, 0.4824, 0.4745],
[0.5725, 0.5490, 0.4235, ..., 0.4706, 0.4824, 0.4784],
[0.5843, 0.5176, 0.3294, ..., 0.4431, 0.4706, 0.4824]]]],
device='cuda:0')
[20:01:29] Load video: /tmp/tmpqda7wxbqd14.mp4 live_portrait_pipeline.py:99
The FPS of /tmp/tmpqda7wxbqd14.mp4 is: live_portrait_pipeline.py:104
30
Load video file (mp4 mov avi etc...): live_portrait_pipeline.py:106
/tmp/tmpqda7wxbqd14.mp4
[20:01:31] Start making motion template... live_portrait_pipeline.py:110
Making motion templates... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:09
[20:01:48] Dump motion template to live_portrait_pipeline.py:127
/tmp/tmpqda7wxbqd14.pkl
Prepared pasteback mask done. live_portrait_pipeline.py:139
🚀Animating... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:01:17
Concatenating result... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:03
[20:03:20] Video with audio generated successfully: video.py:197
/tmp/tmpbuvf7ptkScreenshot from 2024-07-08
14-50-41--tmpqda7wxbqd14_concat_with_audio.mp4
Replace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:230
2024-07-08
14-50-41--tmpqda7wxbqd14_concat.mp4
with /tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_concat_with_au
dio.mp4
[swscaler @ 0x568f240] Warning: data is not aligned! This can lead to a speed loss
[20:03:30] Video with audio generated successfully: video.py:197
/tmp/tmpbuvf7ptkScreenshot from 2024-07-08
14-50-41--tmpqda7wxbqd14_with_audio.mp4
Replace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:244
2024-07-08 14-50-41--tmpqda7wxbqd14.mp4
with /tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_with_audio.mp4
Animated template: live_portrait_pipeline.py:248
/tmp/tmpqda7wxbqd14.pkl, you can
specify `-d` argument with this
template path next time to avoid
cropping video, motion making and
protecting privacy.
Animated video: live_portrait_pipeline.py:249
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08 14-50-41--tmpqda7wxbqd14.mp4
Animated video with concact: live_portrait_pipeline.py:250
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_concat.mp4
This model runs on Nvidia T4 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Choose a file from your machine
Hint: you can also drag files onto the input
[20:01:27] Load source image from live_portrait_pipeline.py:49
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08 14-50-41.png
tensor([[[[0.7059, 0.9490, 0.9451, ..., 0.0039, 0.0039, 0.0000],
[0.5098, 0.9059, 0.9529, ..., 0.0078, 0.0039, 0.0000],
[0.5765, 0.8824, 0.9725, ..., 0.0039, 0.0000, 0.0000],
...,
[0.8980, 0.8824, 0.7686, ..., 0.8471, 0.8471, 0.8353],
[0.8902, 0.8392, 0.6667, ..., 0.8431, 0.8471, 0.8353],
[0.8588, 0.7412, 0.5020, ..., 0.8157, 0.8392, 0.8392]],
[[0.7333, 0.9725, 0.9725, ..., 0.0392, 0.0471, 0.0471],
[0.5373, 0.9333, 0.9804, ..., 0.0431, 0.0471, 0.0471],
[0.6039, 0.9098, 0.9961, ..., 0.0471, 0.0471, 0.0471],
...,
[0.7020, 0.7176, 0.6392, ..., 0.6745, 0.6863, 0.6863],
[0.6980, 0.6706, 0.5373, ..., 0.6706, 0.6863, 0.6902],
[0.6902, 0.6078, 0.4118, ..., 0.6431, 0.6745, 0.6902]],
[[0.7333, 0.9725, 0.9725, ..., 0.0353, 0.0392, 0.0392],
[0.5373, 0.9333, 0.9765, ..., 0.0392, 0.0392, 0.0392],
[0.6039, 0.9098, 0.9961, ..., 0.0353, 0.0353, 0.0353],
...,
[0.5765, 0.5922, 0.5176, ..., 0.4706, 0.4824, 0.4745],
[0.5725, 0.5490, 0.4235, ..., 0.4706, 0.4824, 0.4784],
[0.5843, 0.5176, 0.3294, ..., 0.4431, 0.4706, 0.4824]]]],
device='cuda:0')
[20:01:29] Load video: /tmp/tmpqda7wxbqd14.mp4 live_portrait_pipeline.py:99
The FPS of /tmp/tmpqda7wxbqd14.mp4 is: live_portrait_pipeline.py:104
30
Load video file (mp4 mov avi etc...): live_portrait_pipeline.py:106
/tmp/tmpqda7wxbqd14.mp4
[20:01:31] Start making motion template... live_portrait_pipeline.py:110
Making motion templates... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:09
[20:01:48] Dump motion template to live_portrait_pipeline.py:127
/tmp/tmpqda7wxbqd14.pkl
Prepared pasteback mask done. live_portrait_pipeline.py:139
🚀Animating... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:01:17
Concatenating result... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:03
[20:03:20] Video with audio generated successfully: video.py:197
/tmp/tmpbuvf7ptkScreenshot from 2024-07-08
14-50-41--tmpqda7wxbqd14_concat_with_audio.mp4
Replace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:230
2024-07-08
14-50-41--tmpqda7wxbqd14_concat.mp4
with /tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_concat_with_au
dio.mp4
[swscaler @ 0x568f240] Warning: data is not aligned! This can lead to a speed loss
[20:03:30] Video with audio generated successfully: video.py:197
/tmp/tmpbuvf7ptkScreenshot from 2024-07-08
14-50-41--tmpqda7wxbqd14_with_audio.mp4
Replace /tmp/tmpbuvf7ptkScreenshot from live_portrait_pipeline.py:244
2024-07-08 14-50-41--tmpqda7wxbqd14.mp4
with /tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_with_audio.mp4
Animated template: live_portrait_pipeline.py:248
/tmp/tmpqda7wxbqd14.pkl, you can
specify `-d` argument with this
template path next time to avoid
cropping video, motion making and
protecting privacy.
Animated video: live_portrait_pipeline.py:249
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08 14-50-41--tmpqda7wxbqd14.mp4
Animated video with concact: live_portrait_pipeline.py:250
/tmp/tmpbuvf7ptkScreenshot from
2024-07-08
14-50-41--tmpqda7wxbqd14_concat.mp4