From 8b11d46bc8ccc6d753bb546036cd02fe81646f55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Tue, 23 May 2023 00:02:00 +0800 Subject: [PATCH] Add files via upload --- vencoder/ContentVec768L9_Onnx.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/vencoder/ContentVec768L9_Onnx.py b/vencoder/ContentVec768L9_Onnx.py index 8b13789..6cc24e0 100644 --- a/vencoder/ContentVec768L9_Onnx.py +++ b/vencoder/ContentVec768L9_Onnx.py @@ -1 +1,28 @@ +from vencoder.encoder import SpeechEncoder +import onnxruntime +import torch +class ContentVec768L9_Onnx(SpeechEncoder): + def __init__(self,vec_path = "pretrain/vec-768-layer-9.onnx",device=None): + print("load model(s) from {}".format(vec_path)) + self.hidden_dim = 768 + if device is None: + self.dev = torch.device("cpu") + else: + self.dev = torch.device(device) + if device == 'cpu' or device == torch.device("cpu") or device is None: + providers = ['CPUExecutionProvider'] + elif device == 'cuda' or device == torch.device("cuda"): + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] + self.model = onnxruntime.InferenceSession(vec_path, providers=providers) + + def encoder(self, wav): + feats = wav + if feats.dim() == 2: # double channels + feats = feats.mean(-1) + assert feats.dim() == 1, feats.dim() + feats = feats.view(1, -1) + feats = feats.unsqueeze(0).detach().numpy() + onnx_input = {self.model.get_inputs()[0].name: feats} + logits = self.model.run(None, onnx_input) + return torch.tensor(logits[0]).transpose(1, 2) \ No newline at end of file