typefile
{
"batch_size": "7",
"dataset_zip": "https://replicate.delivery/pbxt/Jve3yEeLYIoklA2qhn8uguIBZvcFNLotV503kIrURbBOAoNU/dataset_sam_altman.zip",
"epoch": 80,
"f0method": "rmvpe_gpu",
"sample_rate": "48k",
"version": "v2"
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_QC7**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run replicate/train-rvc-model using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"replicate/train-rvc-model:cf360587a27f67500c30fc31de1e0f0f9aa26dcd7b866e6ac937a07bd104bad9",
{
input: {
batch_size: "7",
dataset_zip: "https://replicate.delivery/pbxt/Jve3yEeLYIoklA2qhn8uguIBZvcFNLotV503kIrURbBOAoNU/dataset_sam_altman.zip",
epoch: 80,
f0method: "rmvpe_gpu",
sample_rate: "48k",
version: "v2"
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_QC7**********************************
This is your API token. Keep it to yourself.
import replicate
Run replicate/train-rvc-model using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"replicate/train-rvc-model:cf360587a27f67500c30fc31de1e0f0f9aa26dcd7b866e6ac937a07bd104bad9",
input={
"batch_size": "7",
"dataset_zip": "https://replicate.delivery/pbxt/Jve3yEeLYIoklA2qhn8uguIBZvcFNLotV503kIrURbBOAoNU/dataset_sam_altman.zip",
"epoch": 80,
"f0method": "rmvpe_gpu",
"sample_rate": "48k",
"version": "v2"
}
)
# To access the file URL:
print(output.url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output.read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_QC7**********************************
This is your API token. Keep it to yourself.
Run replicate/train-rvc-model using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "replicate/train-rvc-model:cf360587a27f67500c30fc31de1e0f0f9aa26dcd7b866e6ac937a07bd104bad9",
"input": {
"batch_size": "7",
"dataset_zip": "https://replicate.delivery/pbxt/Jve3yEeLYIoklA2qhn8uguIBZvcFNLotV503kIrURbBOAoNU/dataset_sam_altman.zip",
"epoch": 80,
"f0method": "rmvpe_gpu",
"sample_rate": "48k",
"version": "v2"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
{
"id": "opl6jylbuhb54lskbcuaj7dxfi",
"model": "replicate/train-rvc-model",
"version": "cf360587a27f67500c30fc31de1e0f0f9aa26dcd7b866e6ac937a07bd104bad9",
"input": {
"batch_size": "7",
"dataset_zip": "https://replicate.delivery/pbxt/Jve3yEeLYIoklA2qhn8uguIBZvcFNLotV503kIrURbBOAoNU/dataset_sam_altman.zip",
"epoch": 80,
"f0method": "rmvpe_gpu",
"sample_rate": "48k",
"version": "v2"
},
"logs": "Current working directory: /src\nBase path: dataset\npython infer/modules/train/preprocess.py 'dataset/sam_altman' 48000 2 './logs/sam_altman' False 3.0\n['infer/modules/train/preprocess.py', 'dataset/sam_altman', '48000', '2', './logs/sam_altman', 'False', '3.0']\nstart preprocess\n['infer/modules/train/preprocess.py', 'dataset/sam_altman', '48000', '2', './logs/sam_altman', 'False', '3.0']\ndataset/sam_altman/split_1.wav->Suc.\ndataset/sam_altman/split_0.wav->Suc.\ndataset/sam_altman/split_100.wav->Suc.\ndataset/sam_altman/split_102.wav->Suc.\ndataset/sam_altman/split_10.wav->Suc.\ndataset/sam_altman/split_104.wav->Suc.\ndataset/sam_altman/split_101.wav->Suc.\ndataset/sam_altman/split_106.wav->Suc.\ndataset/sam_altman/split_103.wav->Suc.\ndataset/sam_altman/split_108.wav->Suc.\ndataset/sam_altman/split_105.wav->Suc.\ndataset/sam_altman/split_12.wav->Suc.\ndataset/sam_altman/split_107.wav->Suc.\ndataset/sam_altman/split_14.wav->Suc.\ndataset/sam_altman/split_11.wav->Suc.\ndataset/sam_altman/split_13.wav->Suc.\ndataset/sam_altman/split_16.wav->Suc.\ndataset/sam_altman/split_15.wav->Suc.\ndataset/sam_altman/split_18.wav->Suc.\ndataset/sam_altman/split_17.wav->Suc.\ndataset/sam_altman/split_2.wav->Suc.\ndataset/sam_altman/split_19.wav->Suc.\ndataset/sam_altman/split_21.wav->Suc.\ndataset/sam_altman/split_20.wav->Suc.\ndataset/sam_altman/split_23.wav->Suc.\ndataset/sam_altman/split_22.wav->Suc.\ndataset/sam_altman/split_25.wav->Suc.\ndataset/sam_altman/split_24.wav->Suc.\ndataset/sam_altman/split_26.wav->Suc.\ndataset/sam_altman/split_27.wav->Suc.\ndataset/sam_altman/split_29.wav->Suc.\ndataset/sam_altman/split_28.wav->Suc.\ndataset/sam_altman/split_3.wav->Suc.\ndataset/sam_altman/split_31.wav->Suc.\ndataset/sam_altman/split_30.wav->Suc.\ndataset/sam_altman/split_33.wav->Suc.\ndataset/sam_altman/split_32.wav->Suc.\ndataset/sam_altman/split_35.wav->Suc.\ndataset/sam_altman/split_37.wav->Suc.\ndataset/sam_altman/split_34.wav->Suc.\ndataset/sam_altman/split_39.wav->Suc.\ndataset/sam_altman/split_40.wav->Suc.\ndataset/sam_altman/split_36.wav->Suc.\ndataset/sam_altman/split_38.wav->Suc.\ndataset/sam_altman/split_42.wav->Suc.\ndataset/sam_altman/split_44.wav->Suc.\ndataset/sam_altman/split_4.wav->Suc.\ndataset/sam_altman/split_41.wav->Suc.\ndataset/sam_altman/split_46.wav->Suc.\ndataset/sam_altman/split_43.wav->Suc.\ndataset/sam_altman/split_48.wav->Suc.\ndataset/sam_altman/split_45.wav->Suc.\ndataset/sam_altman/split_47.wav->Suc.\ndataset/sam_altman/split_5.wav->Suc.\ndataset/sam_altman/split_49.wav->Suc.\ndataset/sam_altman/split_51.wav->Suc.\ndataset/sam_altman/split_50.wav->Suc.\ndataset/sam_altman/split_53.wav->Suc.\ndataset/sam_altman/split_55.wav->Suc.\ndataset/sam_altman/split_57.wav->Suc.\ndataset/sam_altman/split_52.wav->Suc.\ndataset/sam_altman/split_54.wav->Suc.\ndataset/sam_altman/split_59.wav->Suc.\ndataset/sam_altman/split_60.wav->Suc.\ndataset/sam_altman/split_56.wav->Suc.\ndataset/sam_altman/split_62.wav->Suc.\ndataset/sam_altman/split_58.wav->Suc.\ndataset/sam_altman/split_64.wav->Suc.\ndataset/sam_altman/split_6.wav->Suc.\ndataset/sam_altman/split_66.wav->Suc.\ndataset/sam_altman/split_61.wav->Suc.\ndataset/sam_altman/split_63.wav->Suc.\ndataset/sam_altman/split_68.wav->Suc.\ndataset/sam_altman/split_65.wav->Suc.\ndataset/sam_altman/split_7.wav->Suc.\ndataset/sam_altman/split_67.wav->Suc.\ndataset/sam_altman/split_71.wav->Suc.\ndataset/sam_altman/split_69.wav->Suc.\ndataset/sam_altman/split_73.wav->Suc.\ndataset/sam_altman/split_70.wav->Suc.\ndataset/sam_altman/split_72.wav->Suc.\ndataset/sam_altman/split_74.wav->Suc.\ndataset/sam_altman/split_75.wav->Suc.\ndataset/sam_altman/split_76.wav->Suc.\ndataset/sam_altman/split_77.wav->Suc.\ndataset/sam_altman/split_78.wav->Suc.\ndataset/sam_altman/split_8.wav->Suc.\ndataset/sam_altman/split_79.wav->Suc.\ndataset/sam_altman/split_81.wav->Suc.\ndataset/sam_altman/split_83.wav->Suc.\ndataset/sam_altman/split_80.wav->Suc.\ndataset/sam_altman/split_85.wav->Suc.\ndataset/sam_altman/split_82.wav->Suc.\ndataset/sam_altman/split_84.wav->Suc.\ndataset/sam_altman/split_86.wav->Suc.\ndataset/sam_altman/split_87.wav->Suc.\ndataset/sam_altman/split_88.wav->Suc.\ndataset/sam_altman/split_9.wav->Suc.\ndataset/sam_altman/split_89.wav->Suc.\ndataset/sam_altman/split_91.wav->Suc.\ndataset/sam_altman/split_90.wav->Suc.\ndataset/sam_altman/split_93.wav->Suc.\ndataset/sam_altman/split_92.wav->Suc.\ndataset/sam_altman/split_94.wav->Suc.\ndataset/sam_altman/split_95.wav->Suc.\ndataset/sam_altman/split_96.wav->Suc.\ndataset/sam_altman/split_97.wav->Suc.\ndataset/sam_altman/split_99.wav->Suc.\ndataset/sam_altman/split_98.wav->Suc.\nend preprocess\nOutput: None\npython infer/modules/train/extract/extract_f0_rmvpe.py 1 0 0 './logs/sam_altman' True\n['infer/modules/train/extract/extract_f0_rmvpe.py', '1', '0', '0', './logs/sam_altman', 'True']\ntodo-f0-333\nf0ing,now-0,all-333,-./logs/sam_altman/1_16k_wavs/0_0.wav\nLoading rmvpe model\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML\nwarnings.warn(\"Can't initialize NVML\")\nf0ing,now-66,all-333,-./logs/sam_altman/1_16k_wavs/1_3.wav\nf0ing,now-132,all-333,-./logs/sam_altman/1_16k_wavs/39_2.wav\nf0ing,now-198,all-333,-./logs/sam_altman/1_16k_wavs/58_5.wav\nf0ing,now-264,all-333,-./logs/sam_altman/1_16k_wavs/79_2.wav\nf0ing,now-330,all-333,-./logs/sam_altman/1_16k_wavs/9_0.wav\nOutput: None\npython infer/modules/train/extract_feature_print.py cuda:0 1 0 0 './logs/sam_altman' 'v2'\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML\nwarnings.warn(\"Can't initialize NVML\")\n['infer/modules/train/extract_feature_print.py', 'cuda:0', '1', '0', '0', './logs/sam_altman', 'v2']\n./logs/sam_altman\nload model(s) from assets/hubert/hubert_base.pt\n2023-11-23 19:51:49 | INFO | fairseq.tasks.hubert_pretraining | current directory is /src\n2023-11-23 19:51:49 | INFO | fairseq.tasks.hubert_pretraining | HubertPretrainingTask Config {'_name': 'hubert_pretraining', 'data': 'metadata', 'fine_tuning': False, 'labels': ['km'], 'label_dir': 'label', 'label_rate': 50.0, 'sample_rate': 16000, 'normalize': False, 'enable_padding': False, 'max_keep_size': None, 'max_sample_size': 250000, 'min_sample_size': 32000, 'single_target': False, 'random_crop': True, 'pad_audio': False}\n2023-11-23 19:51:49 | INFO | fairseq.models.hubert.hubert | HubertModel Config: {'_name': 'hubert', 'label_rate': 50.0, 'extractor_mode': default, 'encoder_layers': 12, 'encoder_embed_dim': 768, 'encoder_ffn_embed_dim': 3072, 'encoder_attention_heads': 12, 'activation_fn': gelu, 'layer_type': transformer, 'dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'encoder_layerdrop': 0.05, 'dropout_input': 0.1, 'dropout_features': 0.1, 'final_dim': 256, 'untie_final_proj': True, 'layer_norm_first': False, 'conv_feature_layers': '[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2', 'conv_bias': False, 'logit_temp': 0.1, 'target_glu': False, 'feature_grad_mult': 0.1, 'mask_length': 10, 'mask_prob': 0.8, 'mask_selection': static, 'mask_other': 0.0, 'no_mask_overlap': False, 'mask_min_space': 1, 'mask_channel_length': 10, 'mask_channel_prob': 0.0, 'mask_channel_selection': static, 'mask_channel_other': 0.0, 'no_mask_channel_overlap': False, 'mask_channel_min_space': 1, 'conv_pos': 128, 'conv_pos_groups': 16, 'latent_temp': [2.0, 0.5, 0.999995], 'skip_masked': False, 'skip_nomask': False, 'checkpoint_activations': False, 'required_seq_len_multiple': 2, 'depthwise_conv_kernel_size': 31, 'attn_type': '', 'pos_enc_type': 'abs', 'fp16': False}\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\nwarnings.warn(\"torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\")\nmove model to cuda\nall-feature-333\nnow-333,all-0,0_0.wav,(149, 768)\nnow-333,all-33,10_0.wav,(149, 768)\nnow-333,all-66,1_3.wav,(75, 768)\nnow-333,all-99,30_0.wav,(149, 768)\nnow-333,all-132,39_2.wav,(149, 768)\nnow-333,all-165,4_5.wav,(36, 768)\nnow-333,all-198,58_5.wav,(66, 768)\nnow-333,all-231,6_1.wav,(149, 768)\nnow-333,all-264,79_2.wav,(96, 768)\nnow-333,all-297,89_1.wav,(131, 768)\nnow-333,all-330,9_0.wav,(149, 768)\nall-feature-done\nOutput: None\n(42097, 768),1079\n(42097, 768),1079\ntraining\n(42097, 768),1079\ntraining\nadding\nWrite filelist done\nUse gpus: 0\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML\nwarnings.warn(\"Can't initialize NVML\")\nINFO:sam_altman:{'data': {'filter_length': 2048, 'hop_length': 480, 'max_wav_value': 32768.0, 'mel_fmax': None, 'mel_fmin': 0.0, 'n_mel_channels': 128, 'sampling_rate': 48000, 'win_length': 2048, 'training_files': './logs/sam_altman/filelist.txt'}, 'model': {'filter_channels': 768, 'gin_channels': 256, 'hidden_channels': 192, 'inter_channels': 192, 'kernel_size': 3, 'n_heads': 2, 'n_layers': 6, 'p_dropout': 0, 'resblock': '1', 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'resblock_kernel_sizes': [3, 7, 11], 'spk_embed_dim': 109, 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [24, 20, 4, 4], 'upsample_rates': [12, 10, 2, 2], 'use_spectral_norm': False}, 'train': {'batch_size': 7, 'betas': [0.8, 0.99], 'c_kl': 1.0, 'c_mel': 45, 'epochs': 20000, 'eps': 1e-09, 'fp16_run': True, 'init_lr_ratio': 1, 'learning_rate': 0.0001, 'log_interval': 200, 'lr_decay': 0.999875, 'seed': 1234, 'segment_size': 17280, 'warmup_epochs': 0}, 'model_dir': './logs/sam_altman', 'experiment_dir': './logs/sam_altman', 'save_every_epoch': 50, 'name': 'sam_altman', 'total_epoch': 80, 'pretrainG': 'assets/pretrained_v2/f0G48k.pth', 'pretrainD': 'assets/pretrained_v2/f0D48k.pth', 'version': 'v2', 'gpus': '0', 'sample_rate': '48k', 'if_f0': 1, 'if_latest': 1, 'save_every_weights': '0', 'if_cache_data_in_gpu': 1}\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML\nwarnings.warn(\"Can't initialize NVML\")\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\nwarnings.warn(\"torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\")\nDEBUG:infer.lib.infer_pack.models:gin_channels: 256, self.spk_embed_dim: 109\nINFO:sam_altman:loaded pretrained assets/pretrained_v2/f0G48k.pth\nINFO:sam_altman:<All keys matched successfully>\nINFO:sam_altman:loaded pretrained assets/pretrained_v2/f0D48k.pth\nINFO:sam_altman:<All keys matched successfully>\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\nNote: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\nreturn _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\nNote: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\nreturn _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\nNote: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\nreturn _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\nNote: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\nreturn _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\nNote: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\nreturn _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torch/autograd/__init__.py:251: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed. This is not an error, but may impair performance.\ngrad.sizes() = [64, 1, 4], strides() = [4, 1, 1]\nbucket_view.sizes() = [64, 1, 4], strides() = [4, 4, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:320.)\nVariable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\nINFO:sam_altman:Train Epoch: 1 [0%]\nINFO:sam_altman:[0, 0.0001]\nINFO:sam_altman:loss_disc=4.172, loss_gen=3.120, loss_fm=8.932,loss_mel=27.330, loss_kl=9.000\nDEBUG:matplotlib:matplotlib data path: /root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/matplotlib/mpl-data\nDEBUG:matplotlib:CONFIGDIR=/root/.config/matplotlib\nDEBUG:matplotlib:interactive is False\nDEBUG:matplotlib:platform is linux\nINFO:sam_altman:====> Epoch: 1 [2023-11-23 19:52:46] | (0:00:18.492362)\nINFO:sam_altman:====> Epoch: 2 [2023-11-23 19:53:01] | (0:00:14.058839)\nINFO:sam_altman:====> Epoch: 3 [2023-11-23 19:53:15] | (0:00:14.087264)\nINFO:sam_altman:====> Epoch: 4 [2023-11-23 19:53:29] | (0:00:14.048906)\nINFO:sam_altman:Train Epoch: 5 [20%]\nINFO:sam_altman:[200, 9.995000937421877e-05]\nINFO:sam_altman:loss_disc=3.921, loss_gen=3.317, loss_fm=7.828,loss_mel=18.033, loss_kl=1.945\nINFO:sam_altman:====> Epoch: 5 [2023-11-23 19:53:43] | (0:00:14.331246)\nINFO:sam_altman:====> Epoch: 6 [2023-11-23 19:53:57] | (0:00:14.016381)\nINFO:sam_altman:====> Epoch: 7 [2023-11-23 19:54:11] | (0:00:14.015517)\nINFO:sam_altman:====> Epoch: 8 [2023-11-23 19:54:25] | (0:00:14.132970)\nINFO:sam_altman:Train Epoch: 9 [69%]\nINFO:sam_altman:[400, 9.990004373906418e-05]\nINFO:sam_altman:loss_disc=3.705, loss_gen=3.479, loss_fm=8.874,loss_mel=17.079, loss_kl=1.697\nINFO:sam_altman:====> Epoch: 9 [2023-11-23 19:54:39] | (0:00:14.305179)\nINFO:sam_altman:====> Epoch: 10 [2023-11-23 19:54:53] | (0:00:14.000367)\nINFO:sam_altman:====> Epoch: 11 [2023-11-23 19:55:07] | (0:00:13.998400)\nINFO:sam_altman:====> Epoch: 12 [2023-11-23 19:55:21] | (0:00:14.007730)\nINFO:sam_altman:Train Epoch: 13 [24%]\nINFO:sam_altman:[600, 9.98501030820433e-05]\nINFO:sam_altman:loss_disc=3.772, loss_gen=3.694, loss_fm=9.682,loss_mel=16.714, loss_kl=1.909\nINFO:sam_altman:====> Epoch: 13 [2023-11-23 19:55:36] | (0:00:14.299253)\nINFO:sam_altman:====> Epoch: 14 [2023-11-23 19:55:50] | (0:00:14.096318)\nINFO:sam_altman:====> Epoch: 15 [2023-11-23 19:56:04] | (0:00:13.995778)\nINFO:sam_altman:====> Epoch: 16 [2023-11-23 19:56:18] | (0:00:14.167015)\nINFO:sam_altman:Train Epoch: 17 [0%]\nINFO:sam_altman:[800, 9.980018739066937e-05]\nINFO:sam_altman:loss_disc=4.066, loss_gen=3.559, loss_fm=8.139,loss_mel=18.515, loss_kl=1.488\nINFO:sam_altman:====> Epoch: 17 [2023-11-23 19:56:32] | (0:00:14.315985)\nINFO:sam_altman:====> Epoch: 18 [2023-11-23 19:56:46] | (0:00:14.002399)\nINFO:sam_altman:====> Epoch: 19 [2023-11-23 19:57:00] | (0:00:14.019100)\nINFO:sam_altman:====> Epoch: 20 [2023-11-23 19:57:14] | (0:00:14.007005)\nINFO:sam_altman:Train Epoch: 21 [20%]\nINFO:sam_altman:[1000, 9.975029665246193e-05]\nINFO:sam_altman:loss_disc=3.879, loss_gen=3.444, loss_fm=9.087,loss_mel=16.523, loss_kl=1.996\nINFO:sam_altman:====> Epoch: 21 [2023-11-23 19:57:29] | (0:00:14.307051)\nINFO:sam_altman:====> Epoch: 22 [2023-11-23 19:57:43] | (0:00:13.993078)\nINFO:sam_altman:====> Epoch: 23 [2023-11-23 19:57:57] | (0:00:14.114252)\nINFO:sam_altman:====> Epoch: 24 [2023-11-23 19:58:11] | (0:00:13.993609)\nINFO:sam_altman:Train Epoch: 25 [96%]\nINFO:sam_altman:[1200, 9.970043085494672e-05]\nINFO:sam_altman:loss_disc=3.763, loss_gen=3.675, loss_fm=8.664,loss_mel=16.108, loss_kl=0.825\nINFO:sam_altman:====> Epoch: 25 [2023-11-23 19:58:25] | (0:00:14.274665)\nINFO:sam_altman:====> Epoch: 26 [2023-11-23 19:58:39] | (0:00:14.000343)\nINFO:sam_altman:====> Epoch: 27 [2023-11-23 19:58:53] | (0:00:13.992903)\nINFO:sam_altman:====> Epoch: 28 [2023-11-23 19:59:07] | (0:00:14.003525)\nINFO:sam_altman:Train Epoch: 29 [94%]\nINFO:sam_altman:[1400, 9.965058998565574e-05]\nINFO:sam_altman:loss_disc=3.845, loss_gen=3.284, loss_fm=9.413,loss_mel=17.019, loss_kl=1.318\nINFO:sam_altman:====> Epoch: 29 [2023-11-23 19:59:21] | (0:00:14.285749)\nINFO:sam_altman:====> Epoch: 30 [2023-11-23 19:59:35] | (0:00:14.010255)\nINFO:sam_altman:====> Epoch: 31 [2023-11-23 19:59:49] | (0:00:14.013091)\nINFO:sam_altman:====> Epoch: 32 [2023-11-23 20:00:04] | (0:00:14.118040)\nINFO:sam_altman:Train Epoch: 33 [0%]\nINFO:sam_altman:[1600, 9.960077403212722e-05]\nINFO:sam_altman:loss_disc=3.623, loss_gen=3.782, loss_fm=9.977,loss_mel=17.460, loss_kl=1.435\nINFO:sam_altman:====> Epoch: 33 [2023-11-23 20:00:18] | (0:00:14.325803)\nINFO:sam_altman:====> Epoch: 34 [2023-11-23 20:00:32] | (0:00:13.998874)\nINFO:sam_altman:====> Epoch: 35 [2023-11-23 20:00:46] | (0:00:14.018591)\nINFO:sam_altman:====> Epoch: 36 [2023-11-23 20:01:00] | (0:00:14.013491)\nINFO:sam_altman:Train Epoch: 37 [65%]\nINFO:sam_altman:[1800, 9.95509829819056e-05]\nINFO:sam_altman:loss_disc=3.833, loss_gen=3.036, loss_fm=6.822,loss_mel=17.565, loss_kl=0.938\nINFO:sam_altman:====> Epoch: 37 [2023-11-23 20:01:14] | (0:00:14.308932)\nINFO:sam_altman:====> Epoch: 38 [2023-11-23 20:01:28] | (0:00:14.012427)\nINFO:sam_altman:====> Epoch: 39 [2023-11-23 20:01:42] | (0:00:14.004681)\nINFO:sam_altman:====> Epoch: 40 [2023-11-23 20:01:56] | (0:00:14.136796)\nINFO:sam_altman:Train Epoch: 41 [4%]\nINFO:sam_altman:[2000, 9.950121682254156e-05]\nINFO:sam_altman:loss_disc=3.531, loss_gen=3.865, loss_fm=9.413,loss_mel=16.917, loss_kl=1.230\nINFO:sam_altman:====> Epoch: 41 [2023-11-23 20:02:11] | (0:00:14.295757)\nINFO:sam_altman:====> Epoch: 42 [2023-11-23 20:02:25] | (0:00:14.027522)\nINFO:sam_altman:====> Epoch: 43 [2023-11-23 20:02:39] | (0:00:14.005888)\nINFO:sam_altman:====> Epoch: 44 [2023-11-23 20:02:53] | (0:00:14.028138)\nINFO:sam_altman:Train Epoch: 45 [0%]\nINFO:sam_altman:[2200, 9.945147554159202e-05]\nINFO:sam_altman:loss_disc=3.822, loss_gen=3.635, loss_fm=6.555,loss_mel=15.532, loss_kl=1.205\nINFO:sam_altman:====> Epoch: 45 [2023-11-23 20:03:07] | (0:00:14.322628)\nINFO:sam_altman:====> Epoch: 46 [2023-11-23 20:03:21] | (0:00:14.005216)\nINFO:sam_altman:====> Epoch: 47 [2023-11-23 20:03:35] | (0:00:14.133467)\nINFO:sam_altman:====> Epoch: 48 [2023-11-23 20:03:49] | (0:00:14.016325)\nINFO:sam_altman:Train Epoch: 49 [67%]\nINFO:sam_altman:[2400, 9.940175912662009e-05]\nINFO:sam_altman:loss_disc=3.923, loss_gen=3.040, loss_fm=6.919,loss_mel=15.679, loss_kl=1.050\nINFO:sam_altman:====> Epoch: 49 [2023-11-23 20:04:04] | (0:00:14.310548)\nINFO:sam_altman:Saving model and optimizer state at epoch 50 to ./logs/sam_altman/G_2333333.pth\nINFO:sam_altman:Saving model and optimizer state at epoch 50 to ./logs/sam_altman/D_2333333.pth\nINFO:sam_altman:====> Epoch: 50 [2023-11-23 20:04:18] | (0:00:14.870339)\nINFO:sam_altman:====> Epoch: 51 [2023-11-23 20:04:33] | (0:00:14.079238)\nINFO:sam_altman:====> Epoch: 52 [2023-11-23 20:04:47] | (0:00:14.035602)\nINFO:sam_altman:====> Epoch: 53 [2023-11-23 20:05:01] | (0:00:14.023377)\nINFO:sam_altman:Train Epoch: 54 [12%]\nINFO:sam_altman:[2600, 9.933964855674948e-05]\nINFO:sam_altman:loss_disc=3.802, loss_gen=3.355, loss_fm=9.064,loss_mel=16.333, loss_kl=1.413\nINFO:sam_altman:====> Epoch: 54 [2023-11-23 20:05:15] | (0:00:14.402449)\nINFO:sam_altman:====> Epoch: 55 [2023-11-23 20:05:29] | (0:00:13.988542)\nINFO:sam_altman:====> Epoch: 56 [2023-11-23 20:05:43] | (0:00:14.031654)\nINFO:sam_altman:====> Epoch: 57 [2023-11-23 20:05:57] | (0:00:14.064693)\nINFO:sam_altman:Train Epoch: 58 [71%]\nINFO:sam_altman:[2800, 9.928998804478705e-05]\nINFO:sam_altman:loss_disc=3.404, loss_gen=3.825, loss_fm=9.404,loss_mel=15.319, loss_kl=0.330\nINFO:sam_altman:====> Epoch: 58 [2023-11-23 20:06:11] | (0:00:14.307631)\nINFO:sam_altman:====> Epoch: 59 [2023-11-23 20:06:25] | (0:00:14.009880)\nINFO:sam_altman:====> Epoch: 60 [2023-11-23 20:06:39] | (0:00:14.011763)\nINFO:sam_altman:====> Epoch: 61 [2023-11-23 20:06:54] | (0:00:14.023478)\nINFO:sam_altman:Train Epoch: 62 [80%]\nINFO:sam_altman:[3000, 9.924035235842533e-05]\nINFO:sam_altman:loss_disc=3.725, loss_gen=3.785, loss_fm=9.929,loss_mel=15.898, loss_kl=1.236\nINFO:sam_altman:====> Epoch: 62 [2023-11-23 20:07:08] | (0:00:14.471969)\nINFO:sam_altman:====> Epoch: 63 [2023-11-23 20:07:22] | (0:00:14.033923)\nINFO:sam_altman:====> Epoch: 64 [2023-11-23 20:07:36] | (0:00:14.029050)\nINFO:sam_altman:====> Epoch: 65 [2023-11-23 20:07:50] | (0:00:14.032673)\nINFO:sam_altman:Train Epoch: 66 [55%]\nINFO:sam_altman:[3200, 9.919074148525384e-05]\nINFO:sam_altman:loss_disc=3.888, loss_gen=3.578, loss_fm=9.723,loss_mel=16.500, loss_kl=1.436\nINFO:sam_altman:====> Epoch: 66 [2023-11-23 20:08:04] | (0:00:14.293031)\nINFO:sam_altman:====> Epoch: 67 [2023-11-23 20:08:18] | (0:00:14.006002)\nINFO:sam_altman:====> Epoch: 68 [2023-11-23 20:08:32] | (0:00:13.997145)\nINFO:sam_altman:====> Epoch: 69 [2023-11-23 20:08:46] | (0:00:14.003798)\nINFO:sam_altman:Train Epoch: 70 [16%]\nINFO:sam_altman:[3400, 9.914115541286833e-05]\nINFO:sam_altman:loss_disc=3.674, loss_gen=3.389, loss_fm=8.772,loss_mel=15.365, loss_kl=0.968\nINFO:sam_altman:====> Epoch: 70 [2023-11-23 20:09:01] | (0:00:14.416095)\nINFO:sam_altman:====> Epoch: 71 [2023-11-23 20:09:15] | (0:00:13.989245)\nINFO:sam_altman:====> Epoch: 72 [2023-11-23 20:09:29] | (0:00:13.990721)\nINFO:sam_altman:====> Epoch: 73 [2023-11-23 20:09:43] | (0:00:13.987562)\nINFO:sam_altman:Train Epoch: 74 [39%]\nINFO:sam_altman:[3600, 9.909159412887068e-05]\nINFO:sam_altman:loss_disc=3.890, loss_gen=2.895, loss_fm=7.118,loss_mel=14.766, loss_kl=0.778\nINFO:sam_altman:====> Epoch: 74 [2023-11-23 20:09:57] | (0:00:14.276048)\nINFO:sam_altman:====> Epoch: 75 [2023-11-23 20:10:11] | (0:00:14.009208)\nINFO:sam_altman:====> Epoch: 76 [2023-11-23 20:10:25] | (0:00:13.998905)\nINFO:sam_altman:====> Epoch: 77 [2023-11-23 20:10:39] | (0:00:14.010387)\nINFO:sam_altman:Train Epoch: 78 [71%]\nINFO:sam_altman:[3800, 9.904205762086905e-05]\nINFO:sam_altman:loss_disc=3.256, loss_gen=3.944, loss_fm=10.533,loss_mel=15.718, loss_kl=1.287\nINFO:sam_altman:====> Epoch: 78 [2023-11-23 20:10:54] | (0:00:14.404643)\nINFO:sam_altman:====> Epoch: 79 [2023-11-23 20:11:08] | (0:00:13.989626)\nINFO:sam_altman:====> Epoch: 80 [2023-11-23 20:11:22] | (0:00:13.996693)\nINFO:sam_altman:Training is done. The program is closed.\nINFO:sam_altman:saving final ckpt:Success.\n/root/.pyenv/versions/3.9.18/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 20 leaked semaphore objects to clean up at shutdown\nwarnings.warn('resource_tracker: There appear to be %d '\nTraining completed. You can check the training log in the console or the 'train.log' file in the experiment directory.\nCreating directory...\nCopying files...\nCopying file: logs/sam_altman/added_IVF1079_Flat_nprobe_1_sam_altman_v2.index\nCopying file: logs/sam_altman/total_fea.npy\nCopying file: assets/weights/sam_altman.pth\nDefining the base directory...\nCreating a Zip file...\nAdding 'added_*.index' files to the Zip file...\nAdding file: /src/Model/sam_altman/added_IVF1079_Flat_nprobe_1_sam_altman_v2.index\nAdding 'total_*.npy' files to the Zip file...\nAdding file: /src/Model/sam_altman/total_fea.npy\nAdding specific file to the Zip file...\nAdding file: /src/Model/sam_altman/sam_altman.pth\nZip file path: /src/Model/sam_altman/sam_altman.zip",
"output": "https://replicate.delivery/pbxt/lN9zQPTvPBaWEVUyLmvclC3nT1CDrOBAFjzGj15MyrV7j1eIA/sam_altman.zip",
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2023-11-23T19:49:21.024972Z",
"started_at": "2023-11-23T19:51:22.421826Z",
"completed_at": "2023-11-23T20:11:33.252713Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/opl6jylbuhb54lskbcuaj7dxfi/cancel",
"get": "https://api.replicate.com/v1/predictions/opl6jylbuhb54lskbcuaj7dxfi"
},
"metrics": {
"predict_time": 1210.830887,
"total_time": 1332.227741
}
}