🐛 | Fixed discriminator outputting nonsense.

This commit is contained in:
2024-12-19 21:26:23 +02:00
parent e43b2ab7ef
commit 1fa2a13091
4 changed files with 34 additions and 25 deletions

View File

@ -25,8 +25,8 @@ val_size = int(dataset_size-train_size)
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
train_data_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
val_data_loader = DataLoader(val_dataset, batch_size=4, shuffle=True)
train_data_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
val_data_loader = DataLoader(val_dataset, batch_size=1, shuffle=True)
# Initialize models and move them to device
generator = SISUGenerator()
@ -36,16 +36,13 @@ generator = generator.to(device)
discriminator = discriminator.to(device)
# Loss
criterion_g = nn.L1Loss() # Perceptual Loss (L1 instead of MSE)
criterion_d = nn.MSELoss() # Can keep MSE for discriminator (optional)
criterion_g = nn.L1Loss()
criterion_d = nn.BCEWithLogitsLoss()
# Optimizers
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999)) # Reduced learning rate
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))
# Learning rate scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min', factor=0.1, patience=5)
# Training loop
num_epochs = 500
@ -61,7 +58,7 @@ for epoch in range(num_epochs):
high_quality = high_quality.to(device)
low_quality = low_quality.to(device)
batch_size = high_quality.size(0)
batch_size = 1
real_labels = torch.ones(batch_size, 1).to(device)
fake_labels = torch.zeros(batch_size, 1).to(device)
@ -75,7 +72,7 @@ for epoch in range(num_epochs):
# 2. Fake data
fake_audio = generator(low_quality)
fake_outputs = discriminator(fake_audio.detach()) # Detach to stop gradient flow to the generator
fake_outputs = discriminator(fake_audio.detach())
d_loss_fake = criterion_d(fake_outputs, fake_labels)
d_loss = (d_loss_real + d_loss_fake) / 2.0 # Without gradient penalty