195 lines
7.2 KiB
Plaintext
195 lines
7.2 KiB
Plaintext
import torch
|
|
import torch.nn as nn
|
|
import torch.optim as optim
|
|
|
|
import torch.nn.functional as F
|
|
import torchaudio
|
|
import tqdm
|
|
|
|
import argparse
|
|
|
|
import math
|
|
|
|
import os
|
|
|
|
from torch.utils.data import random_split
|
|
from torch.utils.data import DataLoader
|
|
|
|
import AudioUtils
|
|
from data import AudioDataset
|
|
from generator import SISUGenerator
|
|
from discriminator import SISUDiscriminator
|
|
|
|
from training_utils import discriminator_train, generator_train
|
|
import file_utils as Data
|
|
|
|
import torchaudio.transforms as T
|
|
|
|
# Init script argument parser
|
|
parser = argparse.ArgumentParser(description="Training script")
|
|
parser.add_argument("--generator", type=str, default=None,
|
|
help="Path to the generator model file")
|
|
parser.add_argument("--discriminator", type=str, default=None,
|
|
help="Path to the discriminator model file")
|
|
parser.add_argument("--device", type=str, default="cpu", help="Select device")
|
|
parser.add_argument("--epoch", type=int, default=0, help="Current epoch for model versioning")
|
|
parser.add_argument("--debug", action="store_true", help="Print debug logs")
|
|
parser.add_argument("--continue_training", action="store_true", help="Continue training using temp_generator and temp_discriminator models")
|
|
|
|
args = parser.parse_args()
|
|
|
|
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
|
|
print(f"Using device: {device}")
|
|
|
|
# Parameters
|
|
sample_rate = 44100
|
|
n_fft = 2048
|
|
hop_length = 256
|
|
win_length = n_fft
|
|
n_mels = 128
|
|
n_mfcc = 20 # If using MFCC
|
|
|
|
mfcc_transform = T.MFCC(
|
|
sample_rate,
|
|
n_mfcc,
|
|
melkwargs = {'n_fft': n_fft, 'hop_length': hop_length}
|
|
).to(device)
|
|
|
|
mel_transform = T.MelSpectrogram(
|
|
sample_rate=sample_rate, n_fft=n_fft, hop_length=hop_length,
|
|
win_length=win_length, n_mels=n_mels, power=1.0 # Magnitude Mel
|
|
).to(device)
|
|
|
|
stft_transform = T.Spectrogram(
|
|
n_fft=n_fft, win_length=win_length, hop_length=hop_length
|
|
).to(device)
|
|
|
|
debug = args.debug
|
|
|
|
# Initialize dataset and dataloader
|
|
dataset_dir = './dataset/good'
|
|
dataset = AudioDataset(dataset_dir, device)
|
|
models_dir = "models"
|
|
os.makedirs(models_dir, exist_ok=True)
|
|
audio_output_dir = "output"
|
|
os.makedirs(audio_output_dir, exist_ok=True)
|
|
|
|
# ========= SINGLE =========
|
|
|
|
train_data_loader = DataLoader(dataset, batch_size=64, shuffle=True)
|
|
|
|
|
|
# ========= MODELS =========
|
|
|
|
generator = SISUGenerator()
|
|
discriminator = SISUDiscriminator()
|
|
|
|
epoch: int = args.epoch
|
|
epoch_from_file = Data.read_data(f"{models_dir}/epoch_data.json")
|
|
|
|
if args.continue_training:
|
|
generator.load_state_dict(torch.load(f"{models_dir}/temp_generator.pt", map_location=device, weights_only=True))
|
|
discriminator.load_state_dict(torch.load(f"{models_dir}/temp_generator.pt", map_location=device, weights_only=True))
|
|
epoch = epoch_from_file["epoch"] + 1
|
|
else:
|
|
if args.generator is not None:
|
|
generator.load_state_dict(torch.load(args.generator, map_location=device, weights_only=True))
|
|
if args.discriminator is not None:
|
|
discriminator.load_state_dict(torch.load(args.discriminator, map_location=device, weights_only=True))
|
|
|
|
generator = generator.to(device)
|
|
discriminator = discriminator.to(device)
|
|
|
|
# Loss
|
|
criterion_g = nn.BCEWithLogitsLoss()
|
|
criterion_d = nn.BCEWithLogitsLoss()
|
|
|
|
# Optimizers
|
|
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
|
|
optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))
|
|
|
|
# Scheduler
|
|
scheduler_g = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_g, mode='min', factor=0.5, patience=5)
|
|
scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min', factor=0.5, patience=5)
|
|
|
|
def start_training():
|
|
generator_epochs = 5000
|
|
for generator_epoch in range(generator_epochs):
|
|
low_quality_audio = (torch.empty((1)), 1)
|
|
high_quality_audio = (torch.empty((1)), 1)
|
|
ai_enhanced_audio = (torch.empty((1)), 1)
|
|
|
|
times_correct = 0
|
|
|
|
# ========= TRAINING =========
|
|
for high_quality_clip, low_quality_clip in tqdm.tqdm(train_data_loader, desc=f"Training epoch {generator_epoch+1}/{generator_epochs}, Current epoch {epoch+1}"):
|
|
# for high_quality_clip, low_quality_clip in train_data_loader:
|
|
high_quality_sample = (high_quality_clip[0], high_quality_clip[1])
|
|
low_quality_sample = (low_quality_clip[0], low_quality_clip[1])
|
|
|
|
# ========= LABELS =========
|
|
batch_size = high_quality_clip[0].size(0)
|
|
real_labels = torch.ones(batch_size, 1).to(device)
|
|
fake_labels = torch.zeros(batch_size, 1).to(device)
|
|
|
|
# ========= DISCRIMINATOR =========
|
|
discriminator.train()
|
|
d_loss = discriminator_train(
|
|
high_quality_sample,
|
|
low_quality_sample,
|
|
real_labels,
|
|
fake_labels,
|
|
discriminator,
|
|
generator,
|
|
criterion_d,
|
|
optimizer_d
|
|
)
|
|
|
|
# ========= GENERATOR =========
|
|
generator.train()
|
|
generator_output, combined_loss, adversarial_loss, mel_l1_tensor, log_stft_l1_tensor, mfcc_l_tensor = generator_train(
|
|
low_quality_sample,
|
|
high_quality_sample,
|
|
real_labels,
|
|
generator,
|
|
discriminator,
|
|
criterion_d,
|
|
optimizer_g,
|
|
device,
|
|
mel_transform,
|
|
stft_transform,
|
|
mfcc_transform
|
|
)
|
|
|
|
if debug:
|
|
print(f"D_LOSS: {d_loss.item():.4f}, COMBINED_LOSS: {combined_loss.item():.4f}, ADVERSARIAL_LOSS: {adversarial_loss.item():.4f}, MEL_L1_LOSS: {mel_l1_tensor.item():.4f}, LOG_STFT_L1_LOSS: {log_stft_l1_tensor.item():.4f}, MFCC_LOSS: {mfcc_l_tensor.item():.4f}")
|
|
scheduler_d.step(d_loss.detach())
|
|
scheduler_g.step(adversarial_loss.detach())
|
|
|
|
# ========= SAVE LATEST AUDIO =========
|
|
high_quality_audio = (high_quality_clip[0][0], high_quality_clip[1][0])
|
|
low_quality_audio = (low_quality_clip[0][0], low_quality_clip[1][0])
|
|
ai_enhanced_audio = (generator_output[0], high_quality_clip[1][0])
|
|
|
|
new_epoch = generator_epoch+epoch
|
|
|
|
if generator_epoch % 25 == 0:
|
|
print(f"Saved epoch {new_epoch}!")
|
|
torchaudio.save(f"{audio_output_dir}/epoch-{new_epoch}-audio-crap.wav", low_quality_audio[0].cpu().detach(), high_quality_audio[1]) # <-- Because audio clip was resampled in data.py from original to crap and to original again.
|
|
torchaudio.save(f"{audio_output_dir}/epoch-{new_epoch}-audio-ai.wav", ai_enhanced_audio[0].cpu().detach(), ai_enhanced_audio[1])
|
|
torchaudio.save(f"{audio_output_dir}/epoch-{new_epoch}-audio-orig.wav", high_quality_audio[0].cpu().detach(), high_quality_audio[1])
|
|
|
|
#if debug:
|
|
# print(generator.state_dict().keys())
|
|
# print(discriminator.state_dict().keys())
|
|
torch.save(discriminator.state_dict(), f"{models_dir}/temp_discriminator.pt")
|
|
torch.save(generator.state_dict(), f"{models_dir}/temp_generator.pt")
|
|
Data.write_data(f"{models_dir}/epoch_data.json", {"epoch": new_epoch})
|
|
|
|
|
|
torch.save(discriminator, "models/epoch-5000-discriminator.pt")
|
|
torch.save(generator, "models/epoch-5000-generator.pt")
|
|
print("Training complete!")
|
|
|
|
start_training()
|