so-vits-svc/vencoder/CNHubertLarge.py

36 lines
1.2 KiB
Python
Raw Permalink Normal View History

2023-06-01 18:15:42 +00:00
import torch
from fairseq import checkpoint_utils
2023-06-26 06:57:53 +00:00
from vencoder.encoder import SpeechEncoder
2023-06-21 18:04:03 +00:00
2023-06-01 18:15:42 +00:00
class CNHubertLarge(SpeechEncoder):
2023-06-21 18:04:03 +00:00
def __init__(self, vec_path="pretrain/chinese-hubert-large-fairseq-ckpt.pt", device=None):
super().__init__()
2023-06-01 18:15:42 +00:00
print("load model(s) from {}".format(vec_path))
self.hidden_dim = 1024
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[vec_path],
suffix="",
)
if device is None:
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.dev = torch.device(device)
self.model = models[0].to(self.dev)
self.model.eval()
def encoder(self, wav):
feats = wav
if feats.dim() == 2: # double channels
2023-06-21 18:04:03 +00:00
feats = feats.mean(-1)
2023-06-01 18:15:42 +00:00
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
inputs = {
"source": feats.to(wav.device),
"padding_mask": padding_mask.to(wav.device)
}
with torch.no_grad():
2023-06-21 18:04:03 +00:00
logits = self.model.extract_features(**inputs)
2023-06-01 18:15:42 +00:00
return logits[0].transpose(1, 2)