136 lines
4.6 KiB
Python
136 lines
4.6 KiB
Python
import math
|
|
from math import sqrt
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
|
|
from modules.commons.common_layers import Mish
|
|
from utils.hparams import hparams
|
|
|
|
Linear = nn.Linear
|
|
ConvTranspose2d = nn.ConvTranspose2d
|
|
|
|
|
|
class AttrDict(dict):
|
|
def __init__(self, *args, **kwargs):
|
|
super(AttrDict, self).__init__(*args, **kwargs)
|
|
self.__dict__ = self
|
|
|
|
def override(self, attrs):
|
|
if isinstance(attrs, dict):
|
|
self.__dict__.update(**attrs)
|
|
elif isinstance(attrs, (list, tuple, set)):
|
|
for attr in attrs:
|
|
self.override(attr)
|
|
elif attrs is not None:
|
|
raise NotImplementedError
|
|
return self
|
|
|
|
|
|
class SinusoidalPosEmb(nn.Module):
|
|
def __init__(self, dim):
|
|
super().__init__()
|
|
self.dim = dim
|
|
|
|
def forward(self, x):
|
|
device = x.device
|
|
half_dim = self.dim // 2
|
|
emb = math.log(10000) / (half_dim - 1)
|
|
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
|
emb = x[:, None] * emb[None, :]
|
|
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
|
return emb
|
|
|
|
|
|
def Conv1d(*args, **kwargs):
|
|
layer = nn.Conv1d(*args, **kwargs)
|
|
nn.init.kaiming_normal_(layer.weight)
|
|
return layer
|
|
|
|
|
|
@torch.jit.script
|
|
def silu(x):
|
|
return x * torch.sigmoid(x)
|
|
|
|
|
|
class ResidualBlock(nn.Module):
|
|
def __init__(self, encoder_hidden, residual_channels, dilation):
|
|
super().__init__()
|
|
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
|
|
self.diffusion_projection = Linear(residual_channels, residual_channels)
|
|
self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
|
|
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
|
|
|
|
def forward(self, x, conditioner, diffusion_step):
|
|
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
|
|
conditioner = self.conditioner_projection(conditioner)
|
|
y = x + diffusion_step
|
|
|
|
y = self.dilated_conv(y) + conditioner
|
|
|
|
gate, filter = torch.chunk(y, 2, dim=1)
|
|
# Using torch.split instead of torch.chunk to avoid using onnx::Slice
|
|
# gate, filter = torch.split(y, torch.div(y.shape[1], 2), dim=1)
|
|
|
|
y = torch.sigmoid(gate) * torch.tanh(filter)
|
|
|
|
y = self.output_projection(y)
|
|
residual, skip = torch.chunk(y, 2, dim=1)
|
|
# Using torch.split instead of torch.chunk to avoid using onnx::Slice
|
|
# residual, skip = torch.split(y, torch.div(y.shape[1], 2), dim=1)
|
|
|
|
return (x + residual) / sqrt(2.0), skip
|
|
|
|
|
|
class DiffNet(nn.Module):
|
|
def __init__(self, in_dims=80):
|
|
super().__init__()
|
|
self.params = params = AttrDict(
|
|
# Model params
|
|
encoder_hidden=hparams['hidden_size'],
|
|
residual_layers=hparams['residual_layers'],
|
|
residual_channels=hparams['residual_channels'],
|
|
dilation_cycle_length=hparams['dilation_cycle_length'],
|
|
)
|
|
self.input_projection = Conv1d(in_dims, params.residual_channels, 1)
|
|
self.diffusion_embedding = SinusoidalPosEmb(params.residual_channels)
|
|
dim = params.residual_channels
|
|
self.mlp = nn.Sequential(
|
|
nn.Linear(dim, dim * 4),
|
|
Mish(),
|
|
nn.Linear(dim * 4, dim)
|
|
)
|
|
self.residual_layers = nn.ModuleList([
|
|
ResidualBlock(params.encoder_hidden, params.residual_channels, 2 ** (i % params.dilation_cycle_length))
|
|
for i in range(params.residual_layers)
|
|
])
|
|
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
|
|
self.output_projection = Conv1d(params.residual_channels, in_dims, 1)
|
|
nn.init.zeros_(self.output_projection.weight)
|
|
|
|
def forward(self, spec, diffusion_step, cond):
|
|
"""
|
|
|
|
:param spec: [B, 1, M, T]
|
|
:param diffusion_step: [B, 1]
|
|
:param cond: [B, M, T]
|
|
:return:
|
|
"""
|
|
x = spec[:, 0]
|
|
x = self.input_projection(x) # x [B, residual_channel, T]
|
|
|
|
x = F.relu(x)
|
|
diffusion_step = self.diffusion_embedding(diffusion_step)
|
|
diffusion_step = self.mlp(diffusion_step)
|
|
skip = []
|
|
for layer_id, layer in enumerate(self.residual_layers):
|
|
x, skip_connection = layer(x, cond, diffusion_step)
|
|
skip.append(skip_connection)
|
|
|
|
x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
|
|
x = self.skip_projection(x)
|
|
x = F.relu(x)
|
|
x = self.output_projection(x) # [B, 80, T]
|
|
return x[:, None, :, :]
|