From 0f0739ef14cfbdad3416ed530e40303d1882ff59 Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Mon, 20 Mar 2023 14:08:55 +0800 Subject: [PATCH 1/3] Update voice auto-split --- inference/infer_tool.py | 4 ++++ inference_main.py | 31 ++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/inference/infer_tool.py b/inference/infer_tool.py index 3a2635b..6440fb0 100644 --- a/inference/infer_tool.py +++ b/inference/infer_tool.py @@ -102,6 +102,10 @@ def pad_array(arr, target_length): pad_right = pad_width - pad_left padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0)) return padded_arr + +def split_list_by_n(list_collection, n, pre=0): + for i in range(0, len(list_collection), n): + yield list_collection[i-pre if i-pre>=0 else i: i + n] class Svc(object): diff --git a/inference_main.py b/inference_main.py index f869369..bde6a2c 100644 --- a/inference_main.py +++ b/inference_main.py @@ -25,6 +25,7 @@ def main(): # 一定要设置的部分 parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径') parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') + parser.add_argument('-cl', '--clip', type=float, default=0, help='音频自动切片,0为不切片,单位为秒/s') parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称') @@ -34,6 +35,7 @@ def main(): help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可') + parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两端音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒/s') # 不用动的部分 parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') @@ -55,6 +57,8 @@ def main(): cluster_infer_ratio = args.cluster_infer_ratio noice_scale = args.noice_scale pad_seconds = args.pad_seconds + clip = args.clip + lg = args.linear_gradient infer_tool.fill_a_to_b(trans, clean_names) for clean_name, tran in zip(clean_names, trans): @@ -65,22 +69,32 @@ def main(): wav_path = Path(raw_audio_path).with_suffix('.wav') chunks = slicer.cut(wav_path, db_thresh=slice_db) audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) + per_size = int(clip*audio_sr) + lg_size = int(lg*audio_sr) + lg = np.linspace(0,1,lg_size) if lg_size!=0 else 0 for spk in spk_list: audio = [] for (slice_tag, data) in audio_data: print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) if slice_tag: print('jump empty segment') _audio = np.zeros(length) + audio.extend(list(infer_tool.pad_array(_audio, length))) + continue + if per_size != 0: + datas = infer_tool.split_list_by_n(data, per_size,lg_size) else: + datas = [data] + for k,dat in enumerate(datas): + per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length + if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') # padd pad_len = int(audio_sr * pad_seconds) - data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])]) + dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") + soundfile.write(raw_path, dat, audio_sr, format="wav") raw_path.seek(0) out_audio, out_sr = svc_model.infer(spk, tran, raw_path, cluster_infer_ratio=cluster_infer_ratio, @@ -90,8 +104,15 @@ def main(): _audio = out_audio.cpu().numpy() pad_len = int(svc_model.target_sample * pad_seconds) _audio = _audio[pad_len:-pad_len] - - audio.extend(list(infer_tool.pad_array(_audio, length))) + _audio = infer_tool.pad_array(_audio, per_length) + if lg_size!=0 and k!=0: + lg1 = audio[-lg_size:] + lg2 = _audio[0:lg_size] + lg_pre = lg1*(1-lg)+lg2*lg + audio = audio[0:-lg_size] + audio.extend(lg_pre) + _audio = _audio[lg_size:] + audio.extend(list(_audio)) key = "auto" if auto_predict_f0 else f"{tran}key" cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' From 63ded35c2afe6f2ed7c6186512d73e881d089a93 Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Mon, 20 Mar 2023 14:09:41 +0800 Subject: [PATCH 2/3] Update voice auto-split --- inference_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference_main.py b/inference_main.py index bde6a2c..809ae31 100644 --- a/inference_main.py +++ b/inference_main.py @@ -35,7 +35,7 @@ def main(): help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两端音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒/s') + parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒/s') # 不用动的部分 parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') From 80a9cbe7e299c68c5360add582c223b131ef824e Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Mon, 20 Mar 2023 16:26:40 +0800 Subject: [PATCH 3/3] Update voice auto-split --- inference_main.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/inference_main.py b/inference_main.py index 809ae31..21e2ef8 100644 --- a/inference_main.py +++ b/inference_main.py @@ -43,6 +43,7 @@ def main(): parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') + parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') args = parser.parse_args() @@ -59,6 +60,7 @@ def main(): pad_seconds = args.pad_seconds clip = args.clip lg = args.linear_gradient + lgr = args.linear_gradient_retain infer_tool.fill_a_to_b(trans, clean_names) for clean_name, tran in zip(clean_names, trans): @@ -71,12 +73,16 @@ def main(): audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) per_size = int(clip*audio_sr) lg_size = int(lg*audio_sr) - lg = np.linspace(0,1,lg_size) if lg_size!=0 else 0 + lg_size_r = int(lg_size*lgr) + lg_size_c_l = (lg_size-lg_size_r)//2 + lg_size_c_r = lg_size-lg_size_r-lg_size_c_l + lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 for spk in spk_list: audio = [] for (slice_tag, data) in audio_data: print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') + length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) if slice_tag: print('jump empty segment') @@ -106,12 +112,12 @@ def main(): _audio = _audio[pad_len:-pad_len] _audio = infer_tool.pad_array(_audio, per_length) if lg_size!=0 and k!=0: - lg1 = audio[-lg_size:] - lg2 = _audio[0:lg_size] + lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:] + lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size] lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-lg_size] + audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size] audio.extend(lg_pre) - _audio = _audio[lg_size:] + _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:] audio.extend(list(_audio)) key = "auto" if auto_predict_f0 else f"{tran}key" cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}"