so-vits-svc/configs_template/config_tiny_template.json

79 lines
1.8 KiB
JSON

{
"train": {
"log_interval": 200,
"eval_interval": 800,
"seed": 1234,
"epochs": 10000,
"learning_rate": 0.0001,
"betas": [
0.8,
0.99
],
"eps": 1e-09,
"batch_size": 6,
"fp16_run": false,
"half_type": "fp16",
"lr_decay": 0.999875,
"segment_size": 10240,
"init_lr_ratio": 1,
"warmup_epochs": 0,
"c_mel": 45,
"c_kl": 1.0,
"use_sr": true,
"max_speclen": 512,
"port": "8001",
"keep_ckpts": 3,
"all_in_mem": false,
"vol_aug":false
},
"data": {
"training_files": "filelists/train.txt",
"validation_files": "filelists/val.txt",
"max_wav_value": 32768.0,
"sampling_rate": 44100,
"filter_length": 2048,
"hop_length": 512,
"win_length": 2048,
"n_mel_channels": 80,
"mel_fmin": 0.0,
"mel_fmax": 22050,
"unit_interpolate_mode":"nearest"
},
"model": {
"inter_channels": 192,
"hidden_channels": 192,
"filter_channels": 512,
"n_heads": 2,
"n_layers": 6,
"kernel_size": 3,
"p_dropout": 0.1,
"resblock": "1",
"resblock_kernel_sizes": [3,7,11],
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
"upsample_rates": [ 8, 8, 2, 2, 2],
"upsample_initial_channel": 400,
"upsample_kernel_sizes": [16,16, 4, 4, 4],
"n_layers_q": 3,
"n_layers_trans_flow": 3,
"n_flow_layer": 4,
"use_spectral_norm": false,
"gin_channels": 768,
"ssl_dim": 768,
"n_speakers": 200,
"vocoder_name":"nsf-hifigan",
"speech_encoder":"vec768l12",
"speaker_embedding":false,
"vol_embedding":false,
"use_depthwise_conv":true,
"flow_share_parameter": true,
"use_automatic_f0_prediction": true,
"use_transformer_flow": false
},
"spk": {
"nyaru": 0,
"huiyu": 1,
"nen": 2,
"paimon": 3,
"yunhao": 4
}
}