🐛 | Fixed model and training

This commit is contained in:
2024-12-18 18:08:44 +02:00
parent 3bcc356eef
commit e43b2ab7ef
3 changed files with 71 additions and 52 deletions

View File

@ -17,7 +17,7 @@ print(f"Using device: {device}")
# Initialize dataset and dataloader
dataset_dir = './dataset/good'
dataset = AudioDataset(dataset_dir, target_duration=2.0) # 5 seconds target duration
dataset = AudioDataset(dataset_dir, target_duration=2.0)
dataset_size = len(dataset)
train_size = int(dataset_size * .9)
@ -35,9 +35,12 @@ discriminator = SISUDiscriminator()
generator = generator.to(device)
discriminator = discriminator.to(device)
# Loss and optimizers
criterion = nn.MSELoss() # Use Mean Squared Error loss
optimizer_g = optim.Adam(generator.parameters(), lr=0.0005, betas=(0.5, 0.999))
# Loss
criterion_g = nn.L1Loss() # Perceptual Loss (L1 instead of MSE)
criterion_d = nn.MSELoss() # Can keep MSE for discriminator (optional)
# Optimizers
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999)) # Reduced learning rate
optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))
# Learning rate scheduler
@ -45,11 +48,16 @@ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min',
# Training loop
num_epochs = 500
lambda_gp = 10
for epoch in range(num_epochs):
original = torch.empty((2))
crap_audio = torch.empty((2))
for low_quality, high_quality in tqdm.tqdm(train_data_loader):
low_quality_audio = torch.empty((1))
high_quality_audio = torch.empty((1))
ai_enhanced_audio = torch.empty((1))
total_d_loss = 0
total_g_loss = 0
# Training
for low_quality, high_quality in tqdm.tqdm(train_data_loader, desc=f"Epoch {epoch+1}/{num_epochs}"):
high_quality = high_quality.to(device)
low_quality = low_quality.to(device)
@ -57,32 +65,44 @@ for epoch in range(num_epochs):
real_labels = torch.ones(batch_size, 1).to(device)
fake_labels = torch.zeros(batch_size, 1).to(device)
real_outputs = discriminator(high_quality)
fake_outputs = discriminator(generator(low_quality))
###### Train Discriminator ######
discriminator.train()
optimizer_d.zero_grad()
d_loss_real = criterion(real_outputs, real_labels)
d_loss_fake = criterion(fake_outputs, fake_labels)
d_loss = (d_loss_real + d_loss_fake) * 0.5
# 1. Real data
real_outputs = discriminator(high_quality)
d_loss_real = criterion_d(real_outputs, real_labels)
# 2. Fake data
fake_audio = generator(low_quality)
fake_outputs = discriminator(fake_audio.detach()) # Detach to stop gradient flow to the generator
d_loss_fake = criterion_d(fake_outputs, fake_labels)
d_loss = (d_loss_real + d_loss_fake) / 2.0 # Without gradient penalty
d_loss.backward()
optimizer_d.step()
total_d_loss += d_loss.item()
# Train Generator
generator.train()
optimizer_g.zero_grad()
fake_audio = generator(low_quality)
fake_outputs = discriminator(fake_audio)
g_loss = criterion(fake_outputs, real_labels)
# Generator loss: how well fake data fools the discriminator
fake_outputs = discriminator(fake_audio) # No detach here
g_loss = criterion_g(fake_outputs, real_labels) # Train generator to produce real-like outputs
g_loss.backward()
optimizer_g.step()
total_g_loss += g_loss.item()
original = high_quality
crap_audio = fake_audio
low_quality_audio = low_quality
high_quality_audio = high_quality
ai_enhanced_audio = fake_audio
if epoch % 10 == 0:
print(crap_audio.size())
torchaudio.save(f"./epoch-{epoch}-audio.wav", crap_audio[0].cpu(), 44100)
torchaudio.save(f"./epoch-{epoch}-audio-orig.wav", original[0].cpu(), 44100)
print(f'Epoch [{epoch+1}/{num_epochs}]')
print(f"Saved epoch {epoch}!")
torchaudio.save(f"./output/epoch-{epoch}-audio-crap.wav", low_quality_audio[0].cpu(), 44100)
torchaudio.save(f"./output/epoch-{epoch}-audio-ai.wav", ai_enhanced_audio[0].cpu(), 44100)
torchaudio.save(f"./output/epoch-{epoch}-audio-orig.wav", high_quality_audio[0].cpu(), 44100)
torch.save(generator.state_dict(), "generator.pt")
torch.save(discriminator.state_dict(), "discriminator.pt")