✨ | Made training bit... spicier.
This commit is contained in:
387
training.py
387
training.py
@@ -1,65 +1,74 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
|
||||
import torch.nn.functional as F
|
||||
import torchaudio
|
||||
import torchaudio.transforms as T
|
||||
import tqdm
|
||||
|
||||
import argparse
|
||||
|
||||
import math
|
||||
|
||||
import os
|
||||
|
||||
from torch.utils.data import random_split
|
||||
from torch.amp import GradScaler, autocast
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import AudioUtils
|
||||
import training_utils
|
||||
from data import AudioDataset
|
||||
from generator import SISUGenerator
|
||||
from discriminator import SISUDiscriminator
|
||||
|
||||
from generator import SISUGenerator
|
||||
from training_utils import discriminator_train, generator_train
|
||||
import file_utils as Data
|
||||
|
||||
import torchaudio.transforms as T
|
||||
|
||||
# Init script argument parser
|
||||
parser = argparse.ArgumentParser(description="Training script")
|
||||
parser.add_argument("--generator", type=str, default=None,
|
||||
help="Path to the generator model file")
|
||||
parser.add_argument("--discriminator", type=str, default=None,
|
||||
help="Path to the discriminator model file")
|
||||
parser.add_argument("--device", type=str, default="cpu", help="Select device")
|
||||
parser.add_argument("--epoch", type=int, default=0, help="Current epoch for model versioning")
|
||||
parser.add_argument("--debug", action="store_true", help="Print debug logs")
|
||||
parser.add_argument("--continue_training", action="store_true", help="Continue training using temp_generator and temp_discriminator models")
|
||||
|
||||
# ---------------------------
|
||||
# Argument parsing
|
||||
# ---------------------------
|
||||
parser = argparse.ArgumentParser(description="Training script (safer defaults)")
|
||||
parser.add_argument("--resume", action="store_true", help="Resume training")
|
||||
parser.add_argument(
|
||||
"--device", type=str, default="cuda", help="Device (cuda, cpu, mps)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--epochs", type=int, default=5000, help="Number of training epochs"
|
||||
)
|
||||
parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
|
||||
parser.add_argument("--num_workers", type=int, default=2, help="DataLoader num_workers")
|
||||
parser.add_argument("--debug", action="store_true", help="Print debug logs")
|
||||
parser.add_argument(
|
||||
"--no_pin_memory", action="store_true", help="Disable pin_memory even on CUDA"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
|
||||
# ---------------------------
|
||||
# Device setup
|
||||
# ---------------------------
|
||||
# Use requested device only if available
|
||||
device = torch.device(
|
||||
args.device if (args.device != "cuda" or torch.cuda.is_available()) else "cpu"
|
||||
)
|
||||
print(f"Using device: {device}")
|
||||
# sensible performance flags
|
||||
if device.type == "cuda":
|
||||
torch.backends.cudnn.benchmark = True
|
||||
# optional: torch.set_float32_matmul_precision("high")
|
||||
debug = args.debug
|
||||
|
||||
# Parameters
|
||||
# ---------------------------
|
||||
# Audio transforms
|
||||
# ---------------------------
|
||||
sample_rate = 44100
|
||||
n_fft = 1024
|
||||
win_length = n_fft
|
||||
hop_length = n_fft // 4
|
||||
n_mels = 40
|
||||
n_mfcc = 13
|
||||
n_mels = 96
|
||||
# n_mfcc = 13
|
||||
|
||||
mfcc_transform = T.MFCC(
|
||||
sample_rate=sample_rate,
|
||||
n_mfcc=n_mfcc,
|
||||
melkwargs={
|
||||
'n_fft': n_fft,
|
||||
'hop_length': hop_length,
|
||||
'win_length': win_length,
|
||||
'n_mels': n_mels,
|
||||
'power': 1.0,
|
||||
}
|
||||
).to(device)
|
||||
# mfcc_transform = T.MFCC(
|
||||
# sample_rate=sample_rate,
|
||||
# n_mfcc=n_mfcc,
|
||||
# melkwargs=dict(
|
||||
# n_fft=n_fft,
|
||||
# hop_length=hop_length,
|
||||
# win_length=win_length,
|
||||
# n_mels=n_mels,
|
||||
# power=1.0,
|
||||
# ),
|
||||
# ).to(device)
|
||||
|
||||
mel_transform = T.MelSpectrogram(
|
||||
sample_rate=sample_rate,
|
||||
@@ -67,138 +76,198 @@ mel_transform = T.MelSpectrogram(
|
||||
hop_length=hop_length,
|
||||
win_length=win_length,
|
||||
n_mels=n_mels,
|
||||
power=1.0 # Magnitude Mel
|
||||
power=1.0,
|
||||
).to(device)
|
||||
|
||||
stft_transform = T.Spectrogram(
|
||||
n_fft=n_fft,
|
||||
win_length=win_length,
|
||||
hop_length=hop_length
|
||||
n_fft=n_fft, win_length=win_length, hop_length=hop_length
|
||||
).to(device)
|
||||
debug = args.debug
|
||||
|
||||
# Initialize dataset and dataloader
|
||||
dataset_dir = './dataset/good'
|
||||
dataset = AudioDataset(dataset_dir, device)
|
||||
models_dir = "./models"
|
||||
os.makedirs(models_dir, exist_ok=True)
|
||||
audio_output_dir = "./output"
|
||||
os.makedirs(audio_output_dir, exist_ok=True)
|
||||
# training_utils.init(mel_transform, stft_transform, mfcc_transform)
|
||||
training_utils.init(mel_transform, stft_transform)
|
||||
|
||||
# ========= SINGLE =========
|
||||
# ---------------------------
|
||||
# Dataset / DataLoader
|
||||
# ---------------------------
|
||||
dataset_dir = "./dataset/good"
|
||||
dataset = AudioDataset(dataset_dir)
|
||||
|
||||
train_data_loader = DataLoader(dataset, batch_size=2048, shuffle=True, num_workers=24)
|
||||
train_loader = DataLoader(
|
||||
dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
num_workers=args.num_workers,
|
||||
pin_memory=True,
|
||||
persistent_workers=True,
|
||||
)
|
||||
|
||||
# ---------------------------
|
||||
# Models
|
||||
# ---------------------------
|
||||
generator = SISUGenerator().to(device)
|
||||
discriminator = SISUDiscriminator().to(device)
|
||||
|
||||
# ========= MODELS =========
|
||||
generator = torch.compile(generator)
|
||||
discriminator = torch.compile(discriminator)
|
||||
|
||||
generator = SISUGenerator()
|
||||
discriminator = SISUDiscriminator()
|
||||
|
||||
epoch: int = args.epoch
|
||||
|
||||
if args.continue_training:
|
||||
if args.generator is not None:
|
||||
generator.load_state_dict(torch.load(args.generator, map_location=device, weights_only=True))
|
||||
elif args.discriminator is not None:
|
||||
discriminator.load_state_dict(torch.load(args.discriminator, map_location=device, weights_only=True))
|
||||
else:
|
||||
generator.load_state_dict(torch.load(f"{models_dir}/temp_generator.pt", map_location=device, weights_only=True))
|
||||
discriminator.load_state_dict(torch.load(f"{models_dir}/temp_discriminator.pt", map_location=device, weights_only=True))
|
||||
|
||||
epoch_from_file = Data.read_data(f"{models_dir}/epoch_data.json")
|
||||
epoch = epoch_from_file["epoch"] + 1
|
||||
|
||||
generator = generator.to(device)
|
||||
discriminator = discriminator.to(device)
|
||||
|
||||
# Loss
|
||||
# ---------------------------
|
||||
# Losses / Optimizers / Scalers
|
||||
# ---------------------------
|
||||
criterion_g = nn.BCEWithLogitsLoss()
|
||||
criterion_d = nn.BCEWithLogitsLoss()
|
||||
|
||||
# Optimizers
|
||||
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
|
||||
optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))
|
||||
optimizer_g = optim.AdamW(
|
||||
generator.parameters(), lr=0.0003, betas=(0.5, 0.999), weight_decay=0.0001
|
||||
)
|
||||
optimizer_d = optim.AdamW(
|
||||
discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999), weight_decay=0.0001
|
||||
)
|
||||
|
||||
# Scheduler
|
||||
scheduler_g = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_g, mode='min', factor=0.5, patience=5)
|
||||
scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min', factor=0.5, patience=5)
|
||||
# Use modern GradScaler signature; choose device_type based on runtime device.
|
||||
scaler = GradScaler(device=device)
|
||||
|
||||
def start_training():
|
||||
generator_epochs = 5000
|
||||
for generator_epoch in range(generator_epochs):
|
||||
high_quality_audio = ([torch.empty((1))], 1)
|
||||
low_quality_audio = ([torch.empty((1))], 1)
|
||||
ai_enhanced_audio = ([torch.empty((1))], 1)
|
||||
scheduler_g = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
||||
optimizer_g, mode="min", factor=0.5, patience=5
|
||||
)
|
||||
scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
||||
optimizer_d, mode="min", factor=0.5, patience=5
|
||||
)
|
||||
|
||||
times_correct = 0
|
||||
|
||||
# ========= TRAINING =========
|
||||
for training_data in tqdm.tqdm(train_data_loader, desc=f"Training epoch {generator_epoch+1}/{generator_epochs}, Current epoch {epoch+1}"):
|
||||
## Data structure:
|
||||
# [[[float..., float..., float...], [float..., float..., float...]], [original_sample_rate, mangled_sample_rate]]
|
||||
|
||||
# ========= LABELS =========
|
||||
good_quality_data = training_data[0][0].to(device)
|
||||
bad_quality_data = training_data[0][1].to(device)
|
||||
original_sample_rate = training_data[1][0]
|
||||
mangled_sample_rate = training_data[1][1]
|
||||
|
||||
batch_size = good_quality_data.size(0)
|
||||
real_labels = torch.ones(batch_size, 1).to(device)
|
||||
fake_labels = torch.zeros(batch_size, 1).to(device)
|
||||
|
||||
high_quality_audio = (good_quality_data, original_sample_rate)
|
||||
low_quality_audio = (bad_quality_data, mangled_sample_rate)
|
||||
|
||||
# ========= DISCRIMINATOR =========
|
||||
discriminator.train()
|
||||
d_loss = discriminator_train(
|
||||
good_quality_data,
|
||||
bad_quality_data,
|
||||
real_labels,
|
||||
fake_labels,
|
||||
discriminator,
|
||||
generator,
|
||||
criterion_d,
|
||||
optimizer_d
|
||||
)
|
||||
|
||||
# ========= GENERATOR =========
|
||||
generator.train()
|
||||
generator_output, combined_loss, adversarial_loss, mel_l1_tensor, log_stft_l1_tensor, mfcc_l_tensor = generator_train(
|
||||
bad_quality_data,
|
||||
good_quality_data,
|
||||
real_labels,
|
||||
generator,
|
||||
discriminator,
|
||||
criterion_d,
|
||||
optimizer_g,
|
||||
device,
|
||||
mel_transform,
|
||||
stft_transform,
|
||||
mfcc_transform
|
||||
)
|
||||
|
||||
if debug:
|
||||
print(f"D_LOSS: {d_loss.item():.4f}, COMBINED_LOSS: {combined_loss.item():.4f}, ADVERSARIAL_LOSS: {adversarial_loss.item():.4f}, MEL_L1_LOSS: {mel_l1_tensor.item():.4f}, LOG_STFT_L1_LOSS: {log_stft_l1_tensor.item():.4f}, MFCC_LOSS: {mfcc_l_tensor.item():.4f}")
|
||||
scheduler_d.step(d_loss.detach())
|
||||
scheduler_g.step(adversarial_loss.detach())
|
||||
|
||||
# ========= SAVE LATEST AUDIO =========
|
||||
high_quality_audio = (good_quality_data, original_sample_rate)
|
||||
low_quality_audio = (bad_quality_data, original_sample_rate)
|
||||
ai_enhanced_audio = (generator_output, original_sample_rate)
|
||||
|
||||
torch.save(discriminator.state_dict(), f"{models_dir}/temp_discriminator.pt")
|
||||
torch.save(generator.state_dict(), f"{models_dir}/temp_generator.pt")
|
||||
|
||||
new_epoch = generator_epoch+epoch
|
||||
Data.write_data(f"{models_dir}/epoch_data.json", {"epoch": new_epoch})
|
||||
# ---------------------------
|
||||
# Checkpoint helpers
|
||||
# ---------------------------
|
||||
models_dir = "./models"
|
||||
os.makedirs(models_dir, exist_ok=True)
|
||||
|
||||
|
||||
torch.save(discriminator, "models/epoch-5000-discriminator.pt")
|
||||
torch.save(generator, "models/epoch-5000-generator.pt")
|
||||
print("Training complete!")
|
||||
def save_ckpt(path, epoch):
|
||||
torch.save(
|
||||
{
|
||||
"epoch": epoch,
|
||||
"G": generator.state_dict(),
|
||||
"D": discriminator.state_dict(),
|
||||
"optG": optimizer_g.state_dict(),
|
||||
"optD": optimizer_d.state_dict(),
|
||||
"scaler": scaler.state_dict(),
|
||||
"schedG": scheduler_g.state_dict(),
|
||||
"schedD": scheduler_d.state_dict(),
|
||||
},
|
||||
path,
|
||||
)
|
||||
|
||||
start_training()
|
||||
|
||||
start_epoch = 0
|
||||
if args.resume:
|
||||
ckpt = torch.load(os.path.join(models_dir, "last.pt"), map_location=device)
|
||||
generator.load_state_dict(ckpt["G"])
|
||||
discriminator.load_state_dict(ckpt["D"])
|
||||
optimizer_g.load_state_dict(ckpt["optG"])
|
||||
optimizer_d.load_state_dict(ckpt["optD"])
|
||||
scaler.load_state_dict(ckpt["scaler"])
|
||||
scheduler_g.load_state_dict(ckpt["schedG"])
|
||||
scheduler_d.load_state_dict(ckpt["schedD"])
|
||||
start_epoch = ckpt.get("epoch", 1)
|
||||
|
||||
# ---------------------------
|
||||
# Training loop (safer)
|
||||
# ---------------------------
|
||||
|
||||
if not train_loader or not train_loader.batch_size:
|
||||
print("There is no data to train with! Exiting...")
|
||||
exit()
|
||||
|
||||
max_batch = max(1, train_loader.batch_size)
|
||||
real_buf = torch.full((max_batch, 1), 0.9, device=device) # label smoothing
|
||||
fake_buf = torch.zeros(max_batch, 1, device=device)
|
||||
|
||||
try:
|
||||
for epoch in range(start_epoch, args.epochs):
|
||||
generator.train()
|
||||
discriminator.train()
|
||||
|
||||
running_d, running_g, steps = 0.0, 0.0, 0
|
||||
|
||||
for i, (
|
||||
(high_quality, low_quality),
|
||||
(high_sample_rate, low_sample_rate),
|
||||
) in enumerate(tqdm.tqdm(train_loader, desc=f"Epoch {epoch}")):
|
||||
batch_size = high_quality.size(0)
|
||||
|
||||
high_quality = high_quality.to(device, non_blocking=True)
|
||||
low_quality = low_quality.to(device, non_blocking=True)
|
||||
|
||||
real_labels = real_buf[:batch_size]
|
||||
fake_labels = fake_buf[:batch_size]
|
||||
|
||||
# --- Discriminator ---
|
||||
optimizer_d.zero_grad(set_to_none=True)
|
||||
with autocast(device_type=device.type):
|
||||
d_loss = discriminator_train(
|
||||
high_quality,
|
||||
low_quality,
|
||||
real_labels,
|
||||
fake_labels,
|
||||
discriminator,
|
||||
generator,
|
||||
criterion_d,
|
||||
)
|
||||
|
||||
scaler.scale(d_loss).backward()
|
||||
scaler.unscale_(optimizer_d)
|
||||
torch.nn.utils.clip_grad_norm_(discriminator.parameters(), 1.0)
|
||||
scaler.step(optimizer_d)
|
||||
|
||||
# --- Generator ---
|
||||
optimizer_g.zero_grad(set_to_none=True)
|
||||
with autocast(device_type=device.type):
|
||||
g_out, g_total, g_adv = generator_train(
|
||||
low_quality,
|
||||
high_quality,
|
||||
real_labels,
|
||||
generator,
|
||||
discriminator,
|
||||
criterion_d,
|
||||
)
|
||||
|
||||
scaler.scale(g_total).backward()
|
||||
scaler.unscale_(optimizer_g)
|
||||
torch.nn.utils.clip_grad_norm_(generator.parameters(), 1.0)
|
||||
scaler.step(optimizer_g)
|
||||
|
||||
scaler.update()
|
||||
|
||||
running_d += float(d_loss.detach().cpu().item())
|
||||
running_g += float(g_total.detach().cpu().item())
|
||||
steps += 1
|
||||
|
||||
# epoch averages & schedulers
|
||||
if steps == 0:
|
||||
print("No steps in epoch (empty dataloader?). Exiting.")
|
||||
break
|
||||
|
||||
mean_d = running_d / steps
|
||||
mean_g = running_g / steps
|
||||
|
||||
scheduler_d.step(mean_d)
|
||||
scheduler_g.step(mean_g)
|
||||
|
||||
save_ckpt(os.path.join(models_dir, "last.pt"), epoch)
|
||||
print(f"Epoch {epoch} done | D {mean_d:.4f} | G {mean_g:.4f}")
|
||||
|
||||
except Exception:
|
||||
try:
|
||||
save_ckpt(os.path.join(models_dir, "crash_last.pt"), epoch)
|
||||
print(f"Saved crash checkpoint for epoch {epoch}")
|
||||
except Exception as e:
|
||||
print("Failed saving crash checkpoint:", e)
|
||||
raise
|
||||
|
||||
try:
|
||||
torch.save(generator.state_dict(), os.path.join(models_dir, "final_generator.pt"))
|
||||
torch.save(
|
||||
discriminator.state_dict(), os.path.join(models_dir, "final_discriminator.pt")
|
||||
)
|
||||
except Exception as e:
|
||||
print("Failed to save final states:", e)
|
||||
|
||||
print("Training finished.")
|
||||
|
Reference in New Issue
Block a user