Merge pull request #191 from magic-akari/4.1-Stable

feat: add compress_model
This commit is contained in:
YuriHead 2023-05-25 14:28:48 +08:00 committed by GitHub
commit 5279111f39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 89 additions and 0 deletions

View File

@ -293,6 +293,15 @@ The existing steps before clustering do not need to be changed. All you need to
### [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.1-Stable/sovits4_for_colab.ipynb) [sovits4_for_colab.ipynb](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.1-Stable/sovits4_for_colab.ipynb)
## 🗜️ Model strip
The generated model contains data that is needed for further training. If you confirm that the model is final and not be used in further training, it is safe to strip these data to get smaller file size (about 1/3).
```shell
# Example
python compress_model.py -c="configs/config.json" -i="logs/44k/G_30400.pth" -o="logs/44k/release.pth"
```
## 📤 Exporting to Onnx
Use [onnx_export.py](https://github.com/svc-develop-team/so-vits-svc/blob/4.0/onnx_export.py)

View File

@ -295,6 +295,17 @@ python inference_main.py -m "logs/44k/G_30400.pth" -c "configs/config.json" -n "
### [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.1-Stable/sovits4_for_colab.ipynb) [sovits4_for_colab.ipynb](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.1-Stable/sovits4_for_colab.ipynb)
## 🗜️ 模型压缩
生成的模型含有继续训练所需的信息。如果确认不再训练,可以移除模型中此部分信息,得到约 1/3 大小的最终模型。
使用 [compress_model.py](compress_model.py)
```shell
# 例
python compress_model.py -c="configs/config.json" -i="logs/44k/G_30400.pth" -o="logs/44k/release.pth"
```
## 📤 Onnx导出
使用 [onnx_export.py](onnx_export.py)

69
compress_model.py Normal file
View File

@ -0,0 +1,69 @@
from collections import OrderedDict
import torch
import utils
from models import SynthesizerTrn
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith('module'):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ','.join(k.split('.')[start_idx:])
new_state_dict[name] = v
return new_state_dict
def removeOptimizer(config: str, input_model: str, output_model: str):
hps = utils.get_hparams_from_file(config)
net_g = SynthesizerTrn(hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model)
optim_g = torch.optim.AdamW(net_g.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
state_dict_g = torch.load(input_model, map_location="cpu")
new_dict_g = copyStateDict(state_dict_g)
keys = []
for k, v in new_dict_g['model'].items():
keys.append(k)
new_dict_g = {k: new_dict_g['model'][k] for k in keys}
torch.save(
{
'model': new_dict_g,
'iteration': 0,
'optimizer': optim_g.state_dict(),
'learning_rate': 0.0001
}, output_model)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c",
"--config",
type=str,
default='configs/config.json')
parser.add_argument("-i", "--input", type=str)
parser.add_argument("-o", "--output", type=str, default=None)
args = parser.parse_args()
output = args.output
if output is None:
import os.path
filename, ext = os.path.splitext(args.input)
output = filename + "_release" + ext
removeOptimizer(args.config, args.input, output)