camenduru
/
streaming-t2v
StreamingT2V: Consistent, Dynamic, and Extendable Long Video Generation from Text
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadID7vqctwv4hnrge0cerpktxeya0wStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 33, chunk: 24, prompt: "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T01:52:59.159613Z", "created_at": "2024-04-10T01:37:45.613000Z", "data_removed": false, "error": null, "id": "7vqctwv4hnrge0cerpktxeya0w", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s \n2024-04-10 01:50:29,984 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 01:52:58,707 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 320.638649, "total_time": 913.546613 }, "output": [ "https://replicate.delivery/pbxt/WddFc1cTKramHdBNuf0syY6f6ddLmXEreRd8woKzRe9ovojKB/output.mp4", "https://replicate.delivery/pbxt/bIAEbq6tviolC9ODGGK9sefOj6D3ZXb2eIPVTkQe1hDpvojKB/output_enhanced.mp4" ], "started_at": "2024-04-10T01:47:38.520964Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/7vqctwv4hnrge0cerpktxeya0w", "cancel": "https://api.replicate.com/v1/predictions/7vqctwv4hnrge0cerpktxeya0w/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s 2024-04-10 01:50:29,984 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 01:52:58,707 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadID0dczra67jxrge0cerpv9nfvkrmStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 33, chunk: 24, prompt: "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T02:03:10.356844Z", "created_at": "2024-04-10T01:54:34.007000Z", "data_removed": false, "error": null, "id": "0dczra67jxrge0cerpv9nfvkrm", "input": { "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s\n2024-04-10 02:00:41,838 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 02:03:09,910 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 317.994453, "total_time": 516.349844 }, "output": [ "https://replicate.delivery/pbxt/Z2WXg4M8YZJfOSgfZWfgnDRnBh2U1EQxPNyzAnxms4h6q0RlA/output.mp4", "https://replicate.delivery/pbxt/WwgevDPt8oWmR6VRPfHfOlgTfvvkt6R2gqDSk7futXcwrSHVC/output_enhanced.mp4" ], "started_at": "2024-04-10T01:57:52.362391Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/0dczra67jxrge0cerpv9nfvkrm", "cancel": "https://api.replicate.com/v1/predictions/0dczra67jxrge0cerpv9nfvkrm/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s 2024-04-10 02:00:41,838 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 02:03:09,910 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadID0d05rkf575rge0cerpz9mefe04StatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- People dancing in room filled with fog and colorful lights.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "People dancing in room filled with fog and colorful lights.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 33, chunk: 24, prompt: "People dancing in room filled with fog and colorful lights.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 33, "chunk": 24, "prompt": "People dancing in room filled with fog and colorful lights.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 33, "chunk": 24, "prompt": "People dancing in room filled with fog and colorful lights.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T02:08:42.670720Z", "created_at": "2024-04-10T02:03:25.881000Z", "data_removed": false, "error": null, "id": "0d05rkf575rge0cerpz9mefe04", "input": { "seed": 33, "chunk": 24, "prompt": "People dancing in room filled with fog and colorful lights.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")", "metrics": { "predict_time": 316.774495, "total_time": 316.78972 }, "output": [ "https://replicate.delivery/pbxt/S0NaFjYwqUZhChi2qp6gibrjAtNMqeLA0oGTyCejdXxqa6oSA/output.mp4", "https://replicate.delivery/pbxt/Q52lLhCFIqZMLpdq1E0A0mcOO0oeAy7jySs9NKbXZhRVNdUJA/output_enhanced.mp4" ], "started_at": "2024-04-10T02:03:25.896225Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/0d05rkf575rge0cerpz9mefe04", "cancel": "https://api.replicate.com/v1/predictions/0d05rkf575rge0cerpz9mefe04/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.")
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadIDkd5476jj11rg80cerpzr1217gmStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 33, chunk: 24, prompt: "sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 33, "chunk": 24, "prompt": "sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 33, "chunk": 24, "prompt": "sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T02:10:12.985631Z", "created_at": "2024-04-10T02:03:53.736000Z", "data_removed": false, "error": null, "id": "kd5476jj11rg80cerpzr1217gm", "input": { "seed": 33, "chunk": 24, "prompt": "sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, coastal landscape, seaside scenery.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s\n2024-04-10 02:07:45,470 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 02:10:12,535 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 319.054031, "total_time": 379.249631 }, "output": [ "https://replicate.delivery/pbxt/DM1lUEChGT4fZSbJXB1cOKUsWdDnVlSOoOXbNeJQPmmEc6oSA/output.mp4", "https://replicate.delivery/pbxt/lMbCTgZhwHYOOZWns8EsNhL7imZ88FMeVk0AsYFaeWSEc6oSA/output_enhanced.mp4" ], "started_at": "2024-04-10T02:04:53.931600Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/kd5476jj11rg80cerpzr1217gm", "cancel": "https://api.replicate.com/v1/predictions/kd5476jj11rg80cerpzr1217gm/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s 2024-04-10 02:07:45,470 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 02:10:12,535 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadIDhhk03y18z1rgc0cerqjasn8mwwStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 12
- prompt
- Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.
- enhance
- overlap
- 4
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 12, "prompt": "Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.", "enhance": true, "overlap": 4, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 33, chunk: 12, prompt: "Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.", enhance: true, overlap: 4, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 33, "chunk": 12, "prompt": "Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.", "enhance": True, "overlap": 4, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 33, "chunk": 12, "prompt": "Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.", "enhance": true, "overlap": 4, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T02:52:46.717711Z", "created_at": "2024-04-10T02:44:08.056000Z", "data_removed": false, "error": null, "id": "hhk03y18z1rgc0cerqjasn8mww", "input": { "seed": 33, "chunk": 12, "prompt": "Discover the secret language of bees: delve into the complex communication system that allows bees to coordinate their actions and navigate the world.", "enhance": true, "overlap": 4, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s \nVideo cannot be processed with chunk size {chunk_size} and overlap size {overlap_size}, trimming it to length {trim_length} to be able to process it\n2024-04-10 02:50:18,435 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 02:52:46,279 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 315.864303, "total_time": 518.661711 }, "output": [ "https://replicate.delivery/pbxt/uwxrIeW0uNW4YymMfbhE7wuvlbVXKbIPp5LaV4ySVeE9H2RlA/output.mp4", "https://replicate.delivery/pbxt/uArxMWer0foeGJJoqlLsMJjdmKhhPeOCTfcu3vJzVYekfhdUJA/output_enhanced.mp4" ], "started_at": "2024-04-10T02:47:30.853408Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/hhk03y18z1rgc0cerqjasn8mww", "cancel": "https://api.replicate.com/v1/predictions/hhk03y18z1rgc0cerqjasn8mww/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s Video cannot be processed with chunk size {chunk_size} and overlap size {overlap_size}, trimming it to length {trim_length} to be able to process it 2024-04-10 02:50:18,435 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 02:52:46,279 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faadIDvy5w1wn121rge0cerqn9sn4jfcStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 1
- chunk
- 24
- prompt
- Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 24
- image_guidance
- 9
- negative_prompt
{ "seed": 1, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": false, "overlap": 8, "num_steps": 50, "num_frames": 24, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", { input: { seed: 1, chunk: 24, prompt: "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", enhance: false, overlap: 8, num_steps: 50, num_frames: 24, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", input={ "seed": 1, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": False, "overlap": 8, "num_steps": 50, "num_frames": 24, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad", "input": { "seed": 1, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": false, "overlap": 8, "num_steps": 50, "num_frames": 24, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T02:53:36.756470Z", "created_at": "2024-04-10T02:51:12.016000Z", "data_removed": false, "error": null, "id": "vy5w1wn121rge0cerqn9sn4jfc", "input": { "seed": 1, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": false, "overlap": 8, "num_steps": 50, "num_frames": 24, "image_guidance": 9, "negative_prompt": "" }, "logs": "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:00:29 • 0:00:00 0.00it/s", "metrics": { "predict_time": 49.968931, "total_time": 144.74047 }, "output": [ "https://replicate.delivery/pbxt/d0Eyny2Dei2dRC3CU0a2ewOU3GFr8EB4GmtHkWNBkLtwE7oSA/output.mp4" ], "started_at": "2024-04-10T02:52:46.787539Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/vy5w1wn121rge0cerqn9sn4jfc", "cancel": "https://api.replicate.com/v1/predictions/vy5w1wn121rge0cerqn9sn4jfc/cancel" }, "version": "776168ae57868838756d0fb6406b8aecbff6d746be9f8d917e322158d8c8faad" }
Generated inLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:00:29 • 0:00:00 0.00it/s
Prediction
camenduru/streaming-t2v:ce9e09771a164b46913a7006bfa6c602735d82b748e6e433acc27103fc973b96IDgkfgk6rwf1rgp0cers2sk3s09rStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- prompt
- Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.
- enhance
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:ce9e09771a164b46913a7006bfa6c602735d82b748e6e433acc27103fc973b96", { input: { seed: 33, prompt: "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", enhance: true, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:ce9e09771a164b46913a7006bfa6c602735d82b748e6e433acc27103fc973b96", input={ "seed": 33, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": True, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ce9e09771a164b46913a7006bfa6c602735d82b748e6e433acc27103fc973b96", "input": { "seed": 33, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T04:37:34.217029Z", "created_at": "2024-04-10T04:30:01.848000Z", "data_removed": false, "error": null, "id": "gkfgk6rwf1rgp0cers2sk3s09r", "input": { "seed": 33, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 128 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:23 • 0:00:00 0.00it/s\n2024-04-10 04:35:05,319 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 04:37:31,267 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 309.690938, "total_time": 452.369029 }, "output": [ "https://replicate.delivery/pbxt/Ine4fu7JgsrLwEYeec5Xb6UqcyjAZgBJ7IEzYRq3uz40YyjKB/output.mp4", "https://replicate.delivery/pbxt/wnuzP6LIHP4QBlw95wlrsqG7xsfzQBDPmLcnycUcN96GTeoSA/output_enhanced.mp4" ], "started_at": "2024-04-10T04:32:24.526091Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/gkfgk6rwf1rgp0cers2sk3s09r", "cancel": "https://api.replicate.com/v1/predictions/gkfgk6rwf1rgp0cers2sk3s09r/cancel" }, "version": "ce9e09771a164b46913a7006bfa6c602735d82b748e6e433acc27103fc973b96" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 128 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:23 • 0:00:00 0.00it/s 2024-04-10 04:35:05,319 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 04:37:31,267 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:eb3fcb5cd02651d0b54ac51547c4e862f6977c587098e8049ed41f5b3bf45294ID33n6tqw0r1rgp0cers8tsgqcfwStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:eb3fcb5cd02651d0b54ac51547c4e862f6977c587098e8049ed41f5b3bf45294", { input: { seed: 33, chunk: 24, prompt: "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:eb3fcb5cd02651d0b54ac51547c4e862f6977c587098e8049ed41f5b3bf45294", input={ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "eb3fcb5cd02651d0b54ac51547c4e862f6977c587098e8049ed41f5b3bf45294", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T04:50:46.949742Z", "created_at": "2024-04-10T04:43:33.952000Z", "data_removed": false, "error": null, "id": "33n6tqw0r1rgp0cers8tsgqcfw", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 128 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s\n2024-04-10 04:48:20,346 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 04:50:46,155 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 307.229312, "total_time": 432.997742 }, "output": [ "https://replicate.delivery/pbxt/wt2ucmHM3XI3I9gU2jkFf4YGp1Fwoh4Celn3o0ruXcSmy8oSA/output.mp4", "https://replicate.delivery/pbxt/5YPFmQhuTYqfOKzpgp5OgdoteigVItpLgbZTAQFve5eaKzjKB/output_enhanced.mp4" ], "started_at": "2024-04-10T04:45:39.720430Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/33n6tqw0r1rgp0cers8tsgqcfw", "cancel": "https://api.replicate.com/v1/predictions/33n6tqw0r1rgp0cers8tsgqcfw/cancel" }, "version": "eb3fcb5cd02651d0b54ac51547c4e862f6977c587098e8049ed41f5b3bf45294" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 128 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:24 • 0:00:00 0.00it/s 2024-04-10 04:48:20,346 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 04:50:46,155 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cfIDes10zwpm81rg80cersd80w878cStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 56
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", { input: { seed: 33, chunk: 24, prompt: "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", enhance: true, overlap: 8, num_steps: 50, num_frames: 56, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", input={ "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T05:09:16.984223Z", "created_at": "2024-04-10T04:53:45.152000Z", "data_removed": false, "error": null, "id": "es10zwpm81rg80cersd80w878c", "input": { "seed": 33, "chunk": 24, "prompt": "Dive into the depths of the ocean: explore vibrant coral reefs, mysterious underwater caves, and the mesmerizing creatures that call the sea home.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 56, "image_guidance": 9, "negative_prompt": "" }, "logs": "You are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning...\nself._warning_cache.warn(\"predict returned None if it was on purpose, ignore this warning...\")\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s\n2024-04-10 05:06:46,981 - modelscope - WARNING - task video-to-video input definition is missing\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")\n2024-04-10 05:09:16,498 - modelscope - WARNING - task video-to-video output keys are missing", "metrics": { "predict_time": 332.706564, "total_time": 931.832223 }, "output": [ "https://replicate.delivery/pbxt/8GtrwGiZJqJWIxDFzzjimIb7vwBZF97ScaijkU9vz1NfheoSA/output.mp4", "https://replicate.delivery/pbxt/ur5ZA0bGNKbeGqI0fjIKz8HSPam6VIqisT13KbJWebT4H6RlA/output_enhanced.mp4" ], "started_at": "2024-04-10T05:03:44.277659Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/es10zwpm81rg80cersd80w878c", "cancel": "https://api.replicate.com/v1/predictions/es10zwpm81rg80cersd80w878c/cancel" }, "version": "1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf" }
Generated inYou are using a CUDA device ('NVIDIA A100-SXM4-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} /usr/local/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:234: UserWarning: predict returned None if it was on purpose, ignore this warning... self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...") Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:02:25 • 0:00:00 0.00it/s 2024-04-10 05:06:46,981 - modelscope - WARNING - task video-to-video input definition is missing /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.") 2024-04-10 05:09:16,498 - modelscope - WARNING - task video-to-video output keys are missing
Prediction
camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cfIDj81wsngpn1rgc0cerspazkgs6mStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- seed
- 33
- chunk
- 24
- prompt
- Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.
- enhance
- overlap
- 8
- num_steps
- 50
- num_frames
- 120
- image_guidance
- 9
- negative_prompt
{ "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 120, "image_guidance": 9, "negative_prompt": "" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", { input: { seed: 33, chunk: 24, prompt: "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", enhance: true, overlap: 8, num_steps: 50, num_frames: 120, image_guidance: 9, negative_prompt: "" } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "camenduru/streaming-t2v:1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", input={ "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": True, "overlap": 8, "num_steps": 50, "num_frames": 120, "image_guidance": 9, "negative_prompt": "" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run camenduru/streaming-t2v using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf", "input": { "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 120, "image_guidance": 9, "negative_prompt": "" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2024-04-10T05:21:57.916845Z", "created_at": "2024-04-10T05:12:36.264000Z", "data_removed": false, "error": null, "id": "j81wsngpn1rgc0cerspazkgs6m", "input": { "seed": 33, "chunk": 24, "prompt": "Experience the dance of jellyfish: float through mesmerizing swarms of jellyfish, pulsating with otherworldly grace and beauty.", "enhance": true, "overlap": 8, "num_steps": 50, "num_frames": 120, "image_guidance": 9, "negative_prompt": "" }, "logs": "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n/usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\nrank_zero_warn(\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nINFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256}\nPredicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:06:16 • 0:00:00 0.00it/s\n/usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773.\nwarnings.warn(f\"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.\")", "metrics": { "predict_time": 561.635751, "total_time": 561.652845 }, "output": [ "https://replicate.delivery/pbxt/DmwNVZX9sd41EZZpO5cf8GgS4wtVs9WM0DEncQdvsSe1P9oSA/output.mp4", "https://replicate.delivery/pbxt/TGxCwYt5I16LLhxOpUUEpDio8RfOofpKwV281XDz9921P9oSA/output_enhanced.mp4" ], "started_at": "2024-04-10T05:12:36.281094Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/j81wsngpn1rgc0cerspazkgs6m", "cancel": "https://api.replicate.com/v1/predictions/j81wsngpn1rgc0cerspazkgs6m/cancel" }, "version": "1fe245aad4bb7f209074a231142ac3eceb3b1f2adc9cf77b46e8ffa2662323cf" }
Generated inLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] /usr/local/lib/python3.10/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, predict_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 48 which is the number of cpus on this machine) in the `DataLoader` init to improve performance. rank_zero_warn( INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} INFERENCE PARAMS = {'concat_video': True, 'conditioning_from_all_past': False, 'conditioning_type': 'fixed', 'eta': 1.0, 'eval_loss_metrics': False, 'frame_rate': 8, 'guidance_scale': 7.5, 'height': 256, 'mode': 'long_video', 'n_autoregressive_generations': 4, 'negative_prompt': '', 'num_inference_steps': 50, 'result_formats': ['eval_mp4'], 'scheduler_cls': '', 'seed': 33, 'start_from_real_input': False, 'use_dec_scaling': True, 'validation_samples': 80, 'video_length': 16, 'width': 256} Predicting ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 0:06:16 • 0:00:00 0.00it/s /usr/local/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:608: UserWarning: Should have tb<=t1 but got tb=4.164773464202881 and t1=4.164773. warnings.warn(f"Should have {tb_name}<=t1 but got {tb_name}={tb} and t1={self._end}.")
Want to make some of these yourself?
Run this model