From 2f8c63b8758ab9bd696549d8c9f38052f206c1fe Mon Sep 17 00:00:00 2001 From: YuriHead Date: Wed, 5 Jul 2023 23:28:56 +0800 Subject: [PATCH] ruff fix --- edgetts/tts.py | 8 ++++---- webUI.py | 7 ++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/edgetts/tts.py b/edgetts/tts.py index 252ad28..c850cbc 100644 --- a/edgetts/tts.py +++ b/edgetts/tts.py @@ -1,10 +1,10 @@ import asyncio import random +import sys + import edge_tts from edge_tts import VoicesManager -import sys -from langdetect import detect -from langdetect import DetectorFactory +from langdetect import DetectorFactory, detect DetectorFactory.seed = 0 @@ -20,7 +20,7 @@ print(f"Text: {TEXT}, Language: {LANG}, Gender: {GENDER}, Rate: {RATE}, Volume: async def _main() -> None: voices = await VoicesManager.create() - if not GENDER is None: + if GENDER is not None: # From "zh-cn" to "zh-CN" etc. if LANG == "zh-cn" or LANG == "zh-tw": LOCALE = LANG[:-2] + LANG[-2:].upper() diff --git a/webUI.py b/webUI.py index 71bbfe4..b8bc6df 100644 --- a/webUI.py +++ b/webUI.py @@ -5,23 +5,20 @@ import re import subprocess import time import traceback - from itertools import chain from pathlib import Path # os.system("wget -P cvec/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt") import gradio as gr -import gradio.processing_utils as gr_pu import librosa import numpy as np import soundfile import torch -from scipy.io import wavfile from compress_model import removeOptimizer +from edgetts.tts_voices import SUPPORTED_LANGUAGES from inference.infer_tool import Svc from utils import mix_model -from edgetts.tts_voices import SUPPORTED_LANGUAGES logging.getLogger('numba').setLevel(logging.WARNING) logging.getLogger('markdown_it').setLevel(logging.WARNING) @@ -228,7 +225,7 @@ def vc_fn2(_text, _lang, _gender, _rate, _volume, sid, output_format, vc_transfo os.remove("tts.wav") return "Success", output_file_path except Exception as e: - if debug: traceback.print_exc() + if debug: traceback.print_exc() # noqa: E701 raise gr.Error(e) def model_compression(_model):