diff --git a/onnxexport/model_onnx_speaker_mix.py b/onnxexport/model_onnx_speaker_mix.py index e6505c8..f055351 100644 --- a/onnxexport/model_onnx_speaker_mix.py +++ b/onnxexport/model_onnx_speaker_mix.py @@ -1,9 +1,14 @@ import torch from torch import nn +from torch.nn import Conv1d, Conv2d from torch.nn import functional as F +from torch.nn.utils import spectral_norm, weight_norm import modules.attentions as attentions +import modules.commons as commons import modules.modules as modules +import utils +from modules.commons import get_padding from utils import f0_to_coarse @@ -15,7 +20,9 @@ class ResidualCouplingBlock(nn.Module): dilation_rate, n_layers, n_flows=4, - gin_channels=0): + gin_channels=0, + share_parameter=False + ): super().__init__() self.channels = channels self.hidden_channels = hidden_channels @@ -26,10 +33,13 @@ class ResidualCouplingBlock(nn.Module): self.gin_channels = gin_channels self.flows = nn.ModuleList() + + self.wn = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=gin_channels) if share_parameter else None + for i in range(n_flows): self.flows.append( modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) + gin_channels=gin_channels, mean_only=True, wn_sharing_parameter=self.wn)) self.flows.append(modules.Flip()) def forward(self, x, x_mask, g=None, reverse=False): @@ -41,6 +51,79 @@ class ResidualCouplingBlock(nn.Module): x = flow(x, x_mask, g=g, reverse=reverse) return x +class TransformerCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + n_flows=4, + gin_channels=0, + share_parameter=False + ): + + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + + self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None + + for i in range(n_flows): + self.flows.append( + modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class Encoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + # print(x.shape,x_lengths.shape) + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + class TextEncoder(nn.Module): def __init__(self, @@ -149,6 +232,12 @@ class SynthesizerTrn(nn.Module): sampling_rate=44100, vol_embedding=False, vocoder_name = "nsf-hifigan", + use_depthwise_conv = False, + use_automatic_f0_prediction = True, + flow_share_parameter = False, + n_flow_layer = 4, + n_layers_trans_flow = 3, + use_transformer_flow = False, **kwargs): super().__init__() @@ -171,6 +260,9 @@ class SynthesizerTrn(nn.Module): self.ssl_dim = ssl_dim self.vol_embedding = vol_embedding self.emb_g = nn.Embedding(n_speakers, gin_channels) + self.use_depthwise_conv = use_depthwise_conv + self.use_automatic_f0_prediction = use_automatic_f0_prediction + self.n_layers_trans_flow = n_layers_trans_flow if vol_embedding: self.emb_vol = nn.Linear(1, hidden_channels) @@ -195,8 +287,11 @@ class SynthesizerTrn(nn.Module): "upsample_initial_channel": upsample_initial_channel, "upsample_kernel_sizes": upsample_kernel_sizes, "gin_channels": gin_channels, + "use_depthwise_conv":use_depthwise_conv } + modules.set_Conv1dModel(self.use_depthwise_conv) + if vocoder_name == "nsf-hifigan": from vdecoder.hifigan.models import Generator self.dec = Generator(h=hps) @@ -208,17 +303,22 @@ class SynthesizerTrn(nn.Module): from vdecoder.hifigan.models import Generator self.dec = Generator(h=hps) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) + self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + if use_transformer_flow: + self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels, share_parameter=flow_share_parameter) + else: + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels, share_parameter=flow_share_parameter) + if self.use_automatic_f0_prediction: + self.f0_decoder = F0Decoder( + 1, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + spk_channels=gin_channels + ) self.emb_uv = nn.Embedding(2, hidden_channels) self.predict_f0 = False self.speaker_map = [] @@ -251,9 +351,16 @@ class SynthesizerTrn(nn.Module): x_mask = torch.unsqueeze(torch.ones_like(f0), 1).to(c.dtype) # vol proj + vol = self.emb_vol(vol[:,:,None]).transpose(1,2) if vol is not None and self.vol_embedding else 0 - + x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) + vol + + if self.use_automatic_f0_prediction and self.predict_f0: + lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 + norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) + pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) + f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise) z = self.flow(z_p, c_mask, g=g, reverse=True)