2023-06-18 15:32:52 +00:00
|
|
|
import copy
|
|
|
|
import math
|
2023-05-15 08:49:12 +00:00
|
|
|
import torch
|
|
|
|
from torch import nn
|
|
|
|
from torch.nn import functional as F
|
2023-06-18 15:32:52 +00:00
|
|
|
|
2023-05-15 08:49:12 +00:00
|
|
|
import modules.attentions as attentions
|
|
|
|
import modules.commons as commons
|
|
|
|
import modules.modules as modules
|
|
|
|
|
|
|
|
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
|
|
|
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
|
|
|
|
|
|
|
import utils
|
|
|
|
from modules.commons import init_weights, get_padding
|
|
|
|
from utils import f0_to_coarse
|
|
|
|
|
|
|
|
class ResidualCouplingBlock(nn.Module):
|
|
|
|
def __init__(self,
|
|
|
|
channels,
|
|
|
|
hidden_channels,
|
|
|
|
kernel_size,
|
|
|
|
dilation_rate,
|
|
|
|
n_layers,
|
|
|
|
n_flows=4,
|
|
|
|
gin_channels=0):
|
|
|
|
super().__init__()
|
|
|
|
self.channels = channels
|
|
|
|
self.hidden_channels = hidden_channels
|
|
|
|
self.kernel_size = kernel_size
|
|
|
|
self.dilation_rate = dilation_rate
|
|
|
|
self.n_layers = n_layers
|
|
|
|
self.n_flows = n_flows
|
|
|
|
self.gin_channels = gin_channels
|
|
|
|
|
|
|
|
self.flows = nn.ModuleList()
|
|
|
|
for i in range(n_flows):
|
|
|
|
self.flows.append(
|
|
|
|
modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
|
|
|
|
gin_channels=gin_channels, mean_only=True))
|
|
|
|
self.flows.append(modules.Flip())
|
|
|
|
|
|
|
|
def forward(self, x, x_mask, g=None, reverse=False):
|
|
|
|
if not reverse:
|
|
|
|
for flow in self.flows:
|
|
|
|
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
|
|
|
else:
|
|
|
|
for flow in reversed(self.flows):
|
|
|
|
x = flow(x, x_mask, g=g, reverse=reverse)
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
class TextEncoder(nn.Module):
|
|
|
|
def __init__(self,
|
|
|
|
out_channels,
|
|
|
|
hidden_channels,
|
|
|
|
kernel_size,
|
|
|
|
n_layers,
|
|
|
|
gin_channels=0,
|
|
|
|
filter_channels=None,
|
|
|
|
n_heads=None,
|
|
|
|
p_dropout=None):
|
|
|
|
super().__init__()
|
|
|
|
self.out_channels = out_channels
|
|
|
|
self.hidden_channels = hidden_channels
|
|
|
|
self.kernel_size = kernel_size
|
|
|
|
self.n_layers = n_layers
|
|
|
|
self.gin_channels = gin_channels
|
|
|
|
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
|
|
|
self.f0_emb = nn.Embedding(256, hidden_channels)
|
|
|
|
|
|
|
|
self.enc_ = attentions.Encoder(
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels,
|
|
|
|
n_heads,
|
|
|
|
n_layers,
|
|
|
|
kernel_size,
|
|
|
|
p_dropout)
|
|
|
|
|
|
|
|
def forward(self, x, x_mask, f0=None, z=None):
|
|
|
|
x = x + self.f0_emb(f0).transpose(1, 2)
|
|
|
|
x = self.enc_(x * x_mask, x_mask)
|
|
|
|
stats = self.proj(x) * x_mask
|
|
|
|
m, logs = torch.split(stats, self.out_channels, dim=1)
|
|
|
|
z = (m + z * torch.exp(logs)) * x_mask
|
|
|
|
|
2023-06-18 15:32:52 +00:00
|
|
|
return z, m, logs, x_mask
|
2023-05-15 08:49:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
class F0Decoder(nn.Module):
|
|
|
|
def __init__(self,
|
|
|
|
out_channels,
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels,
|
|
|
|
n_heads,
|
|
|
|
n_layers,
|
|
|
|
kernel_size,
|
|
|
|
p_dropout,
|
|
|
|
spk_channels=0):
|
|
|
|
super().__init__()
|
|
|
|
self.out_channels = out_channels
|
|
|
|
self.hidden_channels = hidden_channels
|
|
|
|
self.filter_channels = filter_channels
|
|
|
|
self.n_heads = n_heads
|
|
|
|
self.n_layers = n_layers
|
|
|
|
self.kernel_size = kernel_size
|
|
|
|
self.p_dropout = p_dropout
|
|
|
|
self.spk_channels = spk_channels
|
|
|
|
|
|
|
|
self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
|
|
|
|
self.decoder = attentions.FFT(
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels,
|
|
|
|
n_heads,
|
|
|
|
n_layers,
|
|
|
|
kernel_size,
|
|
|
|
p_dropout)
|
|
|
|
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
|
|
|
self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1)
|
|
|
|
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
|
|
|
|
|
|
|
|
def forward(self, x, norm_f0, x_mask, spk_emb=None):
|
|
|
|
x = torch.detach(x)
|
2023-06-18 15:32:52 +00:00
|
|
|
if (spk_emb is not None):
|
2023-05-15 08:49:12 +00:00
|
|
|
x = x + self.cond(spk_emb)
|
|
|
|
x += self.f0_prenet(norm_f0)
|
|
|
|
x = self.prenet(x) * x_mask
|
|
|
|
x = self.decoder(x * x_mask, x_mask)
|
|
|
|
x = self.proj(x) * x_mask
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
class SynthesizerTrn(nn.Module):
|
|
|
|
"""
|
2023-06-18 15:32:52 +00:00
|
|
|
Synthesizer for Training
|
|
|
|
"""
|
2023-05-15 08:49:12 +00:00
|
|
|
|
|
|
|
def __init__(self,
|
|
|
|
spec_channels,
|
|
|
|
segment_size,
|
|
|
|
inter_channels,
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels,
|
|
|
|
n_heads,
|
|
|
|
n_layers,
|
|
|
|
kernel_size,
|
|
|
|
p_dropout,
|
|
|
|
resblock,
|
|
|
|
resblock_kernel_sizes,
|
|
|
|
resblock_dilation_sizes,
|
|
|
|
upsample_rates,
|
|
|
|
upsample_initial_channel,
|
|
|
|
upsample_kernel_sizes,
|
|
|
|
gin_channels,
|
|
|
|
ssl_dim,
|
|
|
|
n_speakers,
|
|
|
|
sampling_rate=44100,
|
2023-06-18 15:32:52 +00:00
|
|
|
vol_embedding=False,
|
|
|
|
vocoder_name = "nsf-hifigan",
|
2023-05-15 08:49:12 +00:00
|
|
|
**kwargs):
|
2023-06-18 15:32:52 +00:00
|
|
|
|
2023-05-15 08:49:12 +00:00
|
|
|
super().__init__()
|
|
|
|
self.spec_channels = spec_channels
|
|
|
|
self.inter_channels = inter_channels
|
|
|
|
self.hidden_channels = hidden_channels
|
|
|
|
self.filter_channels = filter_channels
|
|
|
|
self.n_heads = n_heads
|
|
|
|
self.n_layers = n_layers
|
|
|
|
self.kernel_size = kernel_size
|
|
|
|
self.p_dropout = p_dropout
|
|
|
|
self.resblock = resblock
|
|
|
|
self.resblock_kernel_sizes = resblock_kernel_sizes
|
|
|
|
self.resblock_dilation_sizes = resblock_dilation_sizes
|
|
|
|
self.upsample_rates = upsample_rates
|
|
|
|
self.upsample_initial_channel = upsample_initial_channel
|
|
|
|
self.upsample_kernel_sizes = upsample_kernel_sizes
|
|
|
|
self.segment_size = segment_size
|
|
|
|
self.gin_channels = gin_channels
|
|
|
|
self.ssl_dim = ssl_dim
|
2023-06-18 15:32:52 +00:00
|
|
|
self.vol_embedding = vol_embedding
|
2023-05-15 08:49:12 +00:00
|
|
|
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
2023-06-18 15:32:52 +00:00
|
|
|
if vol_embedding:
|
|
|
|
self.emb_vol = nn.Linear(1, hidden_channels)
|
2023-05-15 08:49:12 +00:00
|
|
|
|
|
|
|
self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
|
|
|
|
|
|
|
|
self.enc_p = TextEncoder(
|
|
|
|
inter_channels,
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels=filter_channels,
|
|
|
|
n_heads=n_heads,
|
|
|
|
n_layers=n_layers,
|
|
|
|
kernel_size=kernel_size,
|
|
|
|
p_dropout=p_dropout
|
|
|
|
)
|
|
|
|
hps = {
|
|
|
|
"sampling_rate": sampling_rate,
|
|
|
|
"inter_channels": inter_channels,
|
|
|
|
"resblock": resblock,
|
|
|
|
"resblock_kernel_sizes": resblock_kernel_sizes,
|
|
|
|
"resblock_dilation_sizes": resblock_dilation_sizes,
|
|
|
|
"upsample_rates": upsample_rates,
|
|
|
|
"upsample_initial_channel": upsample_initial_channel,
|
|
|
|
"upsample_kernel_sizes": upsample_kernel_sizes,
|
|
|
|
"gin_channels": gin_channels,
|
|
|
|
}
|
2023-06-18 15:32:52 +00:00
|
|
|
|
|
|
|
if vocoder_name == "nsf-hifigan":
|
|
|
|
from vdecoder.hifigan.models import Generator
|
|
|
|
self.dec = Generator(h=hps)
|
|
|
|
elif vocoder_name == "nsf-snake-hifigan":
|
|
|
|
from vdecoder.hifiganwithsnake.models import Generator
|
|
|
|
self.dec = Generator(h=hps)
|
|
|
|
else:
|
|
|
|
print("[?] Unkown vocoder: use default(nsf-hifigan)")
|
|
|
|
from vdecoder.hifigan.models import Generator
|
|
|
|
self.dec = Generator(h=hps)
|
|
|
|
|
2023-05-15 08:49:12 +00:00
|
|
|
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
|
|
|
self.f0_decoder = F0Decoder(
|
|
|
|
1,
|
|
|
|
hidden_channels,
|
|
|
|
filter_channels,
|
|
|
|
n_heads,
|
|
|
|
n_layers,
|
|
|
|
kernel_size,
|
|
|
|
p_dropout,
|
|
|
|
spk_channels=gin_channels
|
|
|
|
)
|
|
|
|
self.emb_uv = nn.Embedding(2, hidden_channels)
|
|
|
|
self.predict_f0 = False
|
|
|
|
self.speaker_map = []
|
|
|
|
self.export_mix = False
|
|
|
|
|
2023-06-18 15:32:52 +00:00
|
|
|
def export_chara_mix(self, speakers_mix):
|
|
|
|
self.speaker_map = torch.zeros((len(speakers_mix), 1, 1, self.gin_channels))
|
|
|
|
i = 0
|
|
|
|
for key in speakers_mix.keys():
|
|
|
|
spkidx = speakers_mix[key]
|
|
|
|
self.speaker_map[i] = self.emb_g(torch.LongTensor([[spkidx]]))
|
|
|
|
i = i + 1
|
2023-05-18 07:55:25 +00:00
|
|
|
self.speaker_map = self.speaker_map.unsqueeze(0)
|
2023-05-15 08:49:12 +00:00
|
|
|
self.export_mix = True
|
|
|
|
|
2023-06-18 15:32:52 +00:00
|
|
|
def forward(self, c, f0, mel2ph, uv, noise=None, g=None, vol = None):
|
2023-05-15 08:49:12 +00:00
|
|
|
decoder_inp = F.pad(c, [0, 0, 1, 0])
|
|
|
|
mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]])
|
|
|
|
c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H]
|
|
|
|
|
2023-05-18 07:16:29 +00:00
|
|
|
if self.export_mix: # [N, S] * [S, B, 1, H]
|
|
|
|
g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
|
|
|
|
g = g * self.speaker_map # [N, S, B, 1, H]
|
|
|
|
g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
|
|
|
|
g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
|
2023-05-15 08:49:12 +00:00
|
|
|
else:
|
2023-06-18 15:32:52 +00:00
|
|
|
if g.dim() == 1:
|
|
|
|
g = g.unsqueeze(0)
|
2023-05-15 08:49:12 +00:00
|
|
|
g = self.emb_g(g).transpose(1, 2)
|
2023-06-18 15:32:52 +00:00
|
|
|
|
|
|
|
x_mask = torch.unsqueeze(torch.ones_like(f0), 1).to(c.dtype)
|
|
|
|
# vol proj
|
|
|
|
vol = self.emb_vol(vol[:,:,None]).transpose(1,2) if vol!=None and self.vol_embedding else 0
|
|
|
|
|
|
|
|
x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) + vol
|
|
|
|
|
2023-05-15 08:49:12 +00:00
|
|
|
z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise)
|
|
|
|
z = self.flow(z_p, c_mask, g=g, reverse=True)
|
|
|
|
o = self.dec(z * c_mask, g=g, f0=f0)
|
|
|
|
return o
|
2023-06-18 15:32:52 +00:00
|
|
|
|