From 31bc93cf2b646e3036138f5783a531231bbc34fa Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Tue, 11 Apr 2023 23:41:21 +0800 Subject: [PATCH 1/4] Debug --- inference/infer_tool.py | 5 +++++ webUI.py | 8 +++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/inference/infer_tool.py b/inference/infer_tool.py index 5328c54..10c481d 100644 --- a/inference/infer_tool.py +++ b/inference/infer_tool.py @@ -221,6 +221,11 @@ class Svc(object): # 清理显存 torch.cuda.empty_cache() + def unload_model(self): + # 卸载模型 + del self.net_g_ms + if self.enhancer!=None: del self.enhancer + def slice_inference(self, raw_audio_path, spk, diff --git a/webUI.py b/webUI.py index fb81089..7d5054d 100644 --- a/webUI.py +++ b/webUI.py @@ -28,15 +28,16 @@ model = None spk = None debug = False -cuda = [] +cuda = {} if torch.cuda.is_available(): for i in range(torch.cuda.device_count()): device_name = torch.cuda.get_device_properties(i).name - cuda.append(f"CUDA:{i} {device_name}") + cuda[f"CUDA:{i} {device_name}"] = f"cuda:{i}" def modelAnalysis(model_path,config_path,cluster_model_path,device,enhance): global model try: + device = cuda[device] if "CUDA" in device else device model = Svc(model_path.name, config_path.name, device=device if device!="Auto" else None, cluster_model_path = cluster_model_path.name if cluster_model_path != None else "",nsf_hifigan_enhance=enhance) spks = list(model.spk2id.keys()) device_name = torch.cuda.get_device_properties(model.dev).name if "cuda" in str(model.dev) else str(model.dev) @@ -58,6 +59,7 @@ def modelUnload(): if model is None: return sid.update(choices = [],value=""),"没有模型需要卸载!" else: + model.unload_model() model = None torch.cuda.empty_cache() return sid.update(choices = [],value=""),"模型卸载完毕!" @@ -161,7 +163,7 @@ with gr.Blocks( model_path = gr.File(label="选择模型文件") config_path = gr.File(label="选择配置文件") cluster_model_path = gr.File(label="选择聚类模型文件(没有可以不选)") - device = gr.Dropdown(label="推理设备,默认为自动选择CPU和GPU", choices=["Auto",*cuda,"CPU"], value="Auto") + device = gr.Dropdown(label="推理设备,默认为自动选择CPU和GPU", choices=["Auto",*cuda.keys(),"CPU"], value="Auto") enhance = gr.Checkbox(label="是否使用NSF_HIFIGAN增强,该选项对部分训练集少的模型有一定的音质增强效果,但是对训练好的模型有反面效果,默认关闭", value=False) with gr.Column(): gr.Markdown(value=""" From f2b4515d72a00c9f89a52e58ead1e6f798351020 Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Wed, 12 Apr 2023 00:05:24 +0800 Subject: [PATCH 2/4] Debug --- inference/infer_tool.py | 2 +- webUI.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/inference/infer_tool.py b/inference/infer_tool.py index 10c481d..6a2f4df 100644 --- a/inference/infer_tool.py +++ b/inference/infer_tool.py @@ -224,7 +224,7 @@ class Svc(object): def unload_model(self): # 卸载模型 del self.net_g_ms - if self.enhancer!=None: del self.enhancer + if hasattr(self,"enhancer"): del self.enhancer def slice_inference(self, raw_audio_path, diff --git a/webUI.py b/webUI.py index 7d5054d..ad9042f 100644 --- a/webUI.py +++ b/webUI.py @@ -17,6 +17,7 @@ from scipy.io import wavfile import librosa import torch import time +import traceback logging.getLogger('numba').setLevel(logging.WARNING) logging.getLogger('markdown_it').setLevel(logging.WARNING) @@ -51,6 +52,7 @@ def modelAnalysis(model_path,config_path,cluster_model_path,device,enhance): msg += i + " " return sid.update(choices = spks,value=spks[0]), msg except Exception as e: + if debug: traceback.print_exc() raise gr.Error(e) @@ -90,8 +92,10 @@ def vc_fn(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise soundfile.write(output_file, _audio, model.target_sample, format="wav") return f"推理成功,音频文件保存为results/{filename}", (model.target_sample, _audio) except Exception as e: + if debug: traceback.print_exc() raise gr.Error(e) except Exception as e: + if debug: traceback.print_exc() raise gr.Error(e) @@ -142,6 +146,8 @@ def vc_fn2(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, nois os.remove(save_path2) return a,b +def debug_change(): + debug = debug_button.value with gr.Blocks( theme=gr.themes.Base( @@ -205,9 +211,15 @@ with gr.Blocks( vc_output1 = gr.Textbox(label="Output Message") with gr.Column(): vc_output2 = gr.Audio(label="Output Audio", interactive=False) - + with gr.Row(variant="panel"): + with gr.Column(): + gr.Markdown(value=""" + WebUI设置 + """) + debug_button = gr.Checkbox(label="Debug模式,如果向社区反馈BUG需要打开,打开后控制台可以显示具体错误提示", value=debug) vc_submit.click(vc_fn, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,F0_mean_pooling,enhancer_adaptive_key], [vc_output1, vc_output2]) vc_submit2.click(vc_fn2, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,text2tts,tts_rate,F0_mean_pooling,enhancer_adaptive_key], [vc_output1, vc_output2]) + debug_button.change(debug_change,[],[]) model_load_button.click(modelAnalysis,[model_path,config_path,cluster_model_path,device,enhance],[sid,sid_output]) model_unload_button.click(modelUnload,[],[sid,sid_output]) app.launch() From 6d3d4ea06e6718ae209bad757aa4821b72f84170 Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Wed, 12 Apr 2023 00:11:02 +0800 Subject: [PATCH 3/4] Debug --- webUI.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webUI.py b/webUI.py index ad9042f..ba2b361 100644 --- a/webUI.py +++ b/webUI.py @@ -147,6 +147,7 @@ def vc_fn2(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, nois return a,b def debug_change(): + global debug debug = debug_button.value with gr.Blocks( From 8aa849e270f6ccfb16a13894ecbf9ae09a7bf8af Mon Sep 17 00:00:00 2001 From: ylzz1997 Date: Wed, 12 Apr 2023 00:44:16 +0800 Subject: [PATCH 4/4] Debug --- inference/infer_tool.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/inference/infer_tool.py b/inference/infer_tool.py index 6a2f4df..94b3ca6 100644 --- a/inference/infer_tool.py +++ b/inference/infer_tool.py @@ -6,6 +6,7 @@ import os import time from pathlib import Path from inference import slicer +import gc import librosa import numpy as np @@ -223,8 +224,13 @@ class Svc(object): def unload_model(self): # 卸载模型 + self.net_g_ms = self.net_g_ms.to("cpu") del self.net_g_ms - if hasattr(self,"enhancer"): del self.enhancer + if hasattr(self,"enhancer"): + self.enhancer.enhancer = self.enhancer.enhancer.to("cpu") + del self.enhancer.enhancer + del self.enhancer + gc.collect() def slice_inference(self, raw_audio_path,