⚗️ | Added MultiPeriodDiscriminator implementation from HiFi-GAN

This commit is contained in:
2025-12-06 18:04:18 +02:00
parent bf0a6e58e9
commit e3e555794e
3 changed files with 187 additions and 125 deletions

View File

@@ -2,13 +2,14 @@ import torch
import torch.nn.functional as F
from utils.MultiResolutionSTFTLoss import MultiResolutionSTFTLoss
# Keep STFT settings as is
stft_loss_fn = MultiResolutionSTFTLoss(
fft_sizes=[512, 1024, 2048],
hop_sizes=[64, 128, 256],
win_lengths=[256, 512, 1024]
)
def feature_matching_loss(fmap_r, fmap_g):
"""
Computes L1 distance between real and fake feature maps.
@@ -16,11 +17,9 @@ def feature_matching_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
# Stop gradient on real features to save memory/computation
rl = rl.detach()
loss += torch.mean(torch.abs(rl - gl))
# Scale by number of feature maps to keep loss magnitude reasonable
return loss * 2
@@ -33,11 +32,8 @@ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
r_losses = []
g_losses = []
# Iterate over both MPD and MSD outputs
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
# Real should be 1.0
r_loss = torch.mean((dr - 1) ** 2)
# Fake should be 0.0
g_loss = torch.mean(dg ** 2)
loss += (r_loss + g_loss)
@@ -61,16 +57,11 @@ def generator_adv_loss(disc_generated_outputs):
def discriminator_train(
high_quality,
low_quality,
discriminator,
generator_output
):
# 1. Forward pass through the Ensemble Discriminator
# Note: We pass inputs separately now: (Real_Target, Fake_Candidate)
# We detach generator_output because we are only optimizing D here
y_d_rs, y_d_gs, _, _ = discriminator(high_quality, generator_output.detach())
# 2. Calculate Loss (LSGAN)
d_loss, _, _ = discriminator_loss(y_d_rs, y_d_gs)
return d_loss
@@ -83,25 +74,14 @@ def generator_train(
discriminator,
generator_output
):
# 1. Forward pass through Discriminator
# We do NOT detach generator_output here, we need gradients for G
y_d_rs, y_d_gs, fmap_rs, fmap_gs = discriminator(high_quality, generator_output)
# 2. Adversarial Loss (Try to fool D into thinking G is Real)
loss_gen_adv = generator_adv_loss(y_d_gs)
# 3. Feature Matching Loss (Force G to match internal features of D)
loss_fm = feature_matching_loss(fmap_rs, fmap_gs)
# 4. Mel-Spectrogram / STFT Loss (Audio Quality)
stft_loss = stft_loss_fn(high_quality, generator_output)["total"]
# -----------------------------------------
# 5. Combine Losses
# -----------------------------------------
# Recommended weights for HiFi-GAN/EnCodec style architectures:
# STFT is dominant (45), FM provides stability (2), Adv provides texture (1)
lambda_stft = 45.0
lambda_fm = 2.0
lambda_adv = 1.0