17 Commits

Author SHA1 Message Date
782a3bab28 ⚗️ | More architectural changes 2025-11-18 21:34:59 +02:00
3f23242d6f ⚗️ | Added some stupid ways for training + some makeup 2025-10-04 22:38:11 +03:00
0bc8fc2792 | Made training bit... spicier. 2025-09-10 19:52:53 +03:00
ff38cefdd3 🐛 | Fix loading wrong model. 2025-06-08 18:14:31 +03:00
03fdc050cc | Made training bit faster. 2025-06-07 20:43:52 +03:00
2ded03713d | Added app.py script so the model can be used. 2025-06-06 22:10:06 +03:00
a135c765da 🐛 | Misc fixes... 2025-05-05 00:50:56 +03:00
b1e18443ba | Added support for .mp3 and .flac loading... 2025-05-04 23:56:14 +03:00
660b41aef8 :albemic: | Real-time testing... 2025-05-04 22:48:57 +03:00
d70c86c257 | Implemented MFCC and STFT. 2025-04-26 17:03:28 +03:00
c04b072de6 | Added smarter ways that would've been needed from the begining. 2025-04-16 17:08:13 +03:00
b6d16e4f11 ♻️ | Restructured procject code. 2025-04-14 17:51:34 +03:00
nsiltala
3936b6c160 🐛 | Fixed NVIDIA training... again. 2025-04-07 14:49:07 +03:00
fbcd5803b8 🐛 | Fixed training on CPU and NVIDIA hardware. 2025-04-07 02:14:06 +03:00
9394bc6c5a :albemic: | Fat architecture. Hopefully better results. 2025-04-06 00:05:43 +03:00
f928d8c2cf :albemic: | More tests. 2025-03-25 21:51:29 +02:00
54338e55a9 :albemic: | Tests. 2025-03-25 19:50:51 +02:00
12 changed files with 705 additions and 273 deletions

View File

@@ -1,18 +1,41 @@
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
def stereo_tensor_to_mono(waveform):
if waveform.shape[0] > 1:
# Average across channels
mono_waveform = torch.mean(waveform, dim=0, keepdim=True)
else:
# Already mono
mono_waveform = waveform
return mono_waveform
def stretch_tensor(tensor, target_length): def stereo_tensor_to_mono(waveform: torch.Tensor) -> torch.Tensor:
scale_factor = target_length / tensor.size(1) mono_tensor = torch.mean(waveform, dim=0, keepdim=True)
return mono_tensor
tensor = F.interpolate(tensor, scale_factor=scale_factor, mode='linear', align_corners=False)
return tensor def pad_tensor(audio_tensor: torch.Tensor, target_length: int = 512) -> torch.Tensor:
padding_amount = target_length - audio_tensor.size(-1)
if padding_amount <= 0:
return audio_tensor
padded_audio_tensor = F.pad(audio_tensor, (0, padding_amount))
return padded_audio_tensor
def split_audio(audio_tensor: torch.Tensor, chunk_size: int = 512, pad_last_tensor: bool = False) -> list[torch.Tensor]:
chunks = list(torch.split(audio_tensor, chunk_size, dim=1))
if pad_last_tensor:
last_chunk = chunks[-1]
if last_chunk.size(-1) < chunk_size:
chunks[-1] = pad_tensor(last_chunk, chunk_size)
return chunks
def reconstruct_audio(chunks: list[torch.Tensor]) -> torch.Tensor:
reconstructed_tensor = torch.cat(chunks, dim=-1)
return reconstructed_tensor
def normalize(audio_tensor: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
max_val = torch.max(torch.abs(audio_tensor))
if max_val < eps:
return audio_tensor
return audio_tensor / max_val

View File

@@ -18,6 +18,7 @@ SISU (Super Ingenious Sound Upscaler) is a project that uses GANs (Generative Ad
1. **Set Up**: 1. **Set Up**:
- Make sure you have Python installed (version 3.8 or higher). - Make sure you have Python installed (version 3.8 or higher).
- Install needed packages: `pip install -r requirements.txt` - Install needed packages: `pip install -r requirements.txt`
- Install current version of PyTorch (CUDA/ROCm/What ever your device supports)
2. **Prepare Audio Data**: 2. **Prepare Audio Data**:
- Put your audio files in the `dataset/good` folder. - Put your audio files in the `dataset/good` folder.

0
__init__.py Normal file
View File

128
app.py Normal file
View File

@@ -0,0 +1,128 @@
import argparse
import torch
import torchaudio
import torchcodec
import tqdm
from accelerate import Accelerator
import AudioUtils
from generator import SISUGenerator
# Init script argument parser
parser = argparse.ArgumentParser(description="Training script")
parser.add_argument("--device", type=str, default="cpu", help="Select device")
parser.add_argument("--model", type=str, help="Model to use for upscaling")
parser.add_argument(
"--clip_length",
type=int,
default=8000,
help="Internal clip length, leave unspecified if unsure",
)
parser.add_argument(
"--sample_rate", type=int, default=44100, help="Output clip sample rate"
)
parser.add_argument(
"--bitrate",
type=int,
default=192000,
help="Output clip bitrate",
)
parser.add_argument("-i", "--input", type=str, help="Input audio file")
parser.add_argument("-o", "--output", type=str, help="Output audio file")
args = parser.parse_args()
if args.sample_rate < 8000:
print(
"Sample rate cannot be lower than 8000! (44100 is recommended for base models)"
)
exit()
# ---------------------------
# Init accelerator
# ---------------------------
accelerator = Accelerator(mixed_precision="bf16")
# ---------------------------
# Models
# ---------------------------
generator = SISUGenerator()
accelerator.print("🔨 | Compiling models...")
generator = torch.compile(generator)
accelerator.print("✅ | Compiling done!")
# ---------------------------
# Prepare accelerator
# ---------------------------
generator = accelerator.prepare(generator)
# ---------------------------
# Checkpoint helpers
# ---------------------------
models_dir = args.model
clip_length = args.clip_length
input_audio = args.input
output_audio = args.output
if models_dir:
ckpt = torch.load(models_dir)
accelerator.unwrap_model(generator).load_state_dict(ckpt["G"])
accelerator.print("💾 | Loaded model!")
else:
print(
"Generator model (--model) isn't specified. Do you have the trained model? If not, you need to train it OR acquire it from somewhere (DON'T ASK ME, YET!)"
)
def start():
# To Mono!
decoder = torchcodec.decoders.AudioDecoder(input_audio)
decoded_samples = decoder.get_all_samples()
audio = decoded_samples.data
original_sample_rate = decoded_samples.sample_rate
# Support for multichannel audio
# audio = AudioUtils.stereo_tensor_to_mono(audio)
audio = AudioUtils.normalize(audio)
resample_transform = torchaudio.transforms.Resample(
original_sample_rate, args.sample_rate
)
audio = resample_transform(audio)
splitted_audio = AudioUtils.split_audio(audio, clip_length)
splitted_audio_on_device = [t.view(1, t.shape[0], t.shape[-1]).to(accelerator.device) for t in splitted_audio]
processed_audio = []
with torch.no_grad():
for clip in tqdm.tqdm(splitted_audio_on_device, desc="Processing..."):
channels = []
for audio_channel in torch.split(clip, 1, dim=1):
output_piece = generator(audio_channel)
channels.append(output_piece.detach().cpu())
output_clip = torch.cat(channels, dim=1)
processed_audio.append(output_clip)
reconstructed_audio = AudioUtils.reconstruct_audio(processed_audio)
reconstructed_audio = reconstructed_audio.squeeze(0)
print(f"🔊 | Saving {output_audio}!")
torchaudio.save_with_torchcodec(
uri=output_audio,
src=reconstructed_audio,
sample_rate=args.sample_rate,
channels_first=True,
compression=args.bitrate,
)
start()

98
data.py
View File

@@ -1,53 +1,71 @@
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import torchaudio
import os import os
import random import random
import torchaudio.transforms as T
import torch
import torchaudio
import torchcodec.decoders as decoders
import tqdm
from torch.utils.data import Dataset
import AudioUtils import AudioUtils
class AudioDataset(Dataset):
audio_sample_rates = [11025]
MAX_LENGTH = 88200 # Define your desired maximum length here
def __init__(self, input_dir, device): class AudioDataset(Dataset):
self.input_files = [os.path.join(root, f) for root, _, files in os.walk(input_dir) for f in files if f.endswith('.wav')] audio_sample_rates = [8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100]
self.device = device
def __init__(self, input_dir, clip_length: int = 512, normalize: bool = True):
self.clip_length = clip_length
self.normalize = normalize
input_files = [
os.path.join(input_dir, f)
for f in os.listdir(input_dir)
if os.path.isfile(os.path.join(input_dir, f))
and f.lower().endswith((".wav", ".mp3", ".flac"))
]
data = []
for audio_clip in tqdm.tqdm(
input_files, desc=f"Processing {len(input_files)} audio file(s)"
):
decoder = decoders.AudioDecoder(audio_clip)
decoded_samples = decoder.get_all_samples()
audio = decoded_samples.data.float()
original_sample_rate = decoded_samples.sample_rate
if normalize:
audio = AudioUtils.normalize(audio)
splitted_high_quality_audio = AudioUtils.split_audio(audio, clip_length, True)
if not splitted_high_quality_audio:
continue
for splitted_audio_clip in splitted_high_quality_audio:
for audio_clip in torch.split(splitted_audio_clip, 1):
data.append((audio_clip, original_sample_rate))
self.audio_data = data
def __len__(self): def __len__(self):
return len(self.input_files) return len(self.audio_data)
def __getitem__(self, idx): def __getitem__(self, idx):
# Load high-quality audio audio_clip = self.audio_data[idx]
high_quality_audio, original_sample_rate = torchaudio.load(self.input_files[idx], normalize=True)
# Generate low-quality audio with random downsampling
mangled_sample_rate = random.choice(self.audio_sample_rates) mangled_sample_rate = random.choice(self.audio_sample_rates)
resample_transform_low = torchaudio.transforms.Resample(original_sample_rate, mangled_sample_rate)
low_quality_audio = resample_transform_low(high_quality_audio)
resample_transform_high = torchaudio.transforms.Resample(mangled_sample_rate, original_sample_rate) resample_transform_low = torchaudio.transforms.Resample(
low_quality_audio = resample_transform_high(low_quality_audio) audio_clip[1], mangled_sample_rate
)
high_quality_audio = AudioUtils.stereo_tensor_to_mono(high_quality_audio) resample_transform_high = torchaudio.transforms.Resample(
low_quality_audio = AudioUtils.stereo_tensor_to_mono(low_quality_audio) mangled_sample_rate, audio_clip[1]
)
# Pad or truncate high-quality audio low_audio_clip = resample_transform_high(resample_transform_low(audio_clip[0]))
if high_quality_audio.shape[1] < self.MAX_LENGTH: if audio_clip[0].shape[1] < low_audio_clip.shape[1]:
padding = self.MAX_LENGTH - high_quality_audio.shape[1] low_audio_clip = low_audio_clip[:, :audio_clip[0].shape[1]]
high_quality_audio = F.pad(high_quality_audio, (0, padding)) elif audio_clip[0].shape[1] > low_audio_clip.shape[1]:
elif high_quality_audio.shape[1] > self.MAX_LENGTH: low_audio_clip = AudioUtils.pad_tensor(low_audio_clip, self.clip_length)
high_quality_audio = high_quality_audio[:, :self.MAX_LENGTH] return ((audio_clip[0], low_audio_clip), (audio_clip[1], mangled_sample_rate))
# Pad or truncate low-quality audio
if low_quality_audio.shape[1] < self.MAX_LENGTH:
padding = self.MAX_LENGTH - low_quality_audio.shape[1]
low_quality_audio = F.pad(low_quality_audio, (0, padding))
elif low_quality_audio.shape[1] > self.MAX_LENGTH:
low_quality_audio = low_quality_audio[:, :self.MAX_LENGTH]
high_quality_audio = high_quality_audio.to(self.device)
low_quality_audio = low_quality_audio.to(self.device)
return (high_quality_audio, original_sample_rate), (low_quality_audio, mangled_sample_rate)

View File

@@ -1,18 +1,31 @@
import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.utils as utils import torch.nn.utils as utils
def discriminator_block(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, spectral_norm=True):
padding = (kernel_size // 2) * dilation def discriminator_block(
conv_layer = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) in_channels,
if spectral_norm: out_channels,
conv_layer = utils.spectral_norm(conv_layer) kernel_size=15,
return nn.Sequential( stride=1,
conv_layer, dilation=1
nn.LeakyReLU(0.2, inplace=True), ):
nn.BatchNorm1d(out_channels) padding = dilation * (kernel_size - 1) // 2
conv_layer = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding
) )
conv_layer = utils.spectral_norm(conv_layer)
leaky_relu = nn.LeakyReLU(0.2)
return nn.Sequential(conv_layer, leaky_relu)
class AttentionBlock(nn.Module): class AttentionBlock(nn.Module):
def __init__(self, channels): def __init__(self, channels):
super(AttentionBlock, self).__init__() super(AttentionBlock, self).__init__()
@@ -20,36 +33,38 @@ class AttentionBlock(nn.Module):
nn.Conv1d(channels, channels // 4, kernel_size=1), nn.Conv1d(channels, channels // 4, kernel_size=1),
nn.ReLU(), nn.ReLU(),
nn.Conv1d(channels // 4, channels, kernel_size=1), nn.Conv1d(channels // 4, channels, kernel_size=1),
nn.Sigmoid() nn.Sigmoid(),
) )
def forward(self, x): def forward(self, x):
attention_weights = self.attention(x) attention_weights = self.attention(x)
return x * attention_weights return x + (x * attention_weights)
class SISUDiscriminator(nn.Module): class SISUDiscriminator(nn.Module):
def __init__(self, layers=4): #Increased base layer count def __init__(self, layers=8):
super(SISUDiscriminator, self).__init__() super(SISUDiscriminator, self).__init__()
self.model = nn.Sequential( self.discriminator_blocks = nn.Sequential(
discriminator_block(1, layers, kernel_size=7, stride=4), #Aggressive downsampling # 1 -> 32
discriminator_block(layers, layers * 2, kernel_size=5, stride=2), discriminator_block(2, layers),
discriminator_block(layers * 2, layers * 4, kernel_size=5, dilation=2), AttentionBlock(layers),
discriminator_block(layers * 4, layers * 8, kernel_size=5, dilation=4), # 32 -> 64
AttentionBlock(layers * 8), #Added attention discriminator_block(layers, layers * 2, dilation=2),
discriminator_block(layers * 8, layers * 16, kernel_size=5, dilation=8), # 64 -> 128
discriminator_block(layers * 16, layers * 16, kernel_size=3, dilation=1), discriminator_block(layers * 2, layers * 4, dilation=4),
discriminator_block(layers * 16, layers * 8, kernel_size=3, dilation=2), AttentionBlock(layers * 4),
discriminator_block(layers * 8, layers * 4, kernel_size=3, dilation=1), # 128 -> 256
discriminator_block(layers * 4, layers * 2, kernel_size=3, stride=1), discriminator_block(layers * 4, layers * 8, stride=4),
discriminator_block(layers * 2, layers, kernel_size=3, stride=1), # 256 -> 512
discriminator_block(layers, 1, kernel_size=3, stride=1, spectral_norm=False) #last layer no spectral norm. # discriminator_block(layers * 8, layers * 16, stride=4)
) )
self.global_avg_pool = nn.AdaptiveAvgPool1d(1)
self.sigmoid = nn.Sigmoid() self.final_conv = nn.Conv1d(layers * 8, 1, kernel_size=3, padding=1)
self.avg_pool = nn.AdaptiveAvgPool1d(1)
def forward(self, x): def forward(self, x):
x = self.model(x) x = self.discriminator_blocks(x)
x = self.global_avg_pool(x) x = self.final_conv(x)
x = x.view(-1, 1) x = self.avg_pool(x)
x = self.sigmoid(x) return x.squeeze(2)
return x

View File

@@ -1,30 +1,46 @@
import torch
import torch.nn as nn import torch.nn as nn
def conv_block(in_channels, out_channels, kernel_size=3, dilation=1):
def GeneratorBlock(in_channels, out_channels, kernel_size=3, stride=1, dilation=1):
padding = (kernel_size - 1) // 2 * dilation
return nn.Sequential( return nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, dilation=dilation, padding=(kernel_size // 2) * dilation), nn.Conv1d(
nn.BatchNorm1d(out_channels), in_channels,
nn.PReLU() out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding
),
nn.InstanceNorm1d(out_channels),
nn.PReLU(num_parameters=1, init=0.1),
) )
class AttentionBlock(nn.Module): class AttentionBlock(nn.Module):
def __init__(self, channels): def __init__(self, channels):
super(AttentionBlock, self).__init__() super(AttentionBlock, self).__init__()
self.attention = nn.Sequential( self.attention = nn.Sequential(
nn.Conv1d(channels, channels // 4, kernel_size=1), nn.Conv1d(channels, channels // 4, kernel_size=1),
nn.ReLU(), nn.ReLU(inplace=True),
nn.Conv1d(channels // 4, channels, kernel_size=1), nn.Conv1d(channels // 4, channels, kernel_size=1),
nn.Sigmoid() nn.Sigmoid(),
) )
def forward(self, x): def forward(self, x):
attention_weights = self.attention(x) attention_weights = self.attention(x)
return x * attention_weights return x + (x * attention_weights)
class ResidualInResidualBlock(nn.Module): class ResidualInResidualBlock(nn.Module):
def __init__(self, channels, num_convs=3): def __init__(self, channels, num_convs=3):
super(ResidualInResidualBlock, self).__init__() super(ResidualInResidualBlock, self).__init__()
self.conv_layers = nn.Sequential(*[conv_block(channels, channels) for _ in range(num_convs)])
self.conv_layers = nn.Sequential(
*[GeneratorBlock(channels, channels) for _ in range(num_convs)]
)
self.attention = AttentionBlock(channels) self.attention = AttentionBlock(channels)
def forward(self, x): def forward(self, x):
@@ -33,20 +49,74 @@ class ResidualInResidualBlock(nn.Module):
x = self.attention(x) x = self.attention(x)
return x + residual return x + residual
def UpsampleBlock(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
stride=2,
padding=1
),
nn.InstanceNorm1d(out_channels),
nn.PReLU(num_parameters=1, init=0.1)
)
class SISUGenerator(nn.Module): class SISUGenerator(nn.Module):
def __init__(self, layer=4, num_rirb=4): #increased base layer and rirb amounts def __init__(self, channels=32, num_rirb=1):
super(SISUGenerator, self).__init__() super(SISUGenerator, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(1, layer, kernel_size=7, padding=3), self.first_conv = GeneratorBlock(1, channels)
nn.BatchNorm1d(layer),
nn.PReLU(), self.downsample = GeneratorBlock(channels, channels * 2, stride=2)
self.downsample_attn = AttentionBlock(channels * 2)
self.downsample_2 = GeneratorBlock(channels * 2, channels * 4, stride=2)
self.downsample_2_attn = AttentionBlock(channels * 4)
self.rirb = ResidualInResidualBlock(channels * 4)
# self.rirb = nn.Sequential(
# *[ResidualInResidualBlock(channels * 4) for _ in range(num_rirb)]
# )
self.upsample = UpsampleBlock(channels * 4, channels * 2)
self.upsample_attn = AttentionBlock(channels * 2)
self.compress_1 = GeneratorBlock(channels * 4, channels * 2)
self.upsample_2 = UpsampleBlock(channels * 2, channels)
self.upsample_2_attn = AttentionBlock(channels)
self.compress_2 = GeneratorBlock(channels * 2, channels)
self.final_conv = nn.Sequential(
nn.Conv1d(channels, 1, kernel_size=7, padding=3),
nn.Tanh()
) )
self.rir_blocks = nn.Sequential(*[ResidualInResidualBlock(layer) for _ in range(num_rirb)])
self.final_layer = nn.Conv1d(layer, 1, kernel_size=3, padding=1)
def forward(self, x): def forward(self, x):
residual = x residual_input = x
x = self.conv1(x) x1 = self.first_conv(x)
x = self.rir_blocks(x)
x = self.final_layer(x) x2 = self.downsample(x1)
return x + residual x2 = self.downsample_attn(x2)
x3 = self.downsample_2(x2)
x3 = self.downsample_2_attn(x3)
x_rirb = self.rirb(x3)
up1 = self.upsample(x_rirb)
up1 = self.upsample_attn(up1)
cat1 = torch.cat((up1, x2), dim=1)
comp1 = self.compress_1(cat1)
up2 = self.upsample_2(comp1)
up2 = self.upsample_2_attn(up2)
cat2 = torch.cat((up2, x1), dim=1)
comp2 = self.compress_2(cat2)
learned_residual = self.final_conv(comp2)
output = residual_input + learned_residual
return output

View File

@@ -1,14 +0,0 @@
filelock==3.16.1
fsspec==2024.10.0
Jinja2==3.1.4
MarkupSafe==2.1.5
mpmath==1.3.0
networkx==3.4.2
numpy==2.2.3
pytorch-triton-rocm==3.2.0+git4b3bb1f8
setuptools==70.2.0
sympy==1.13.3
torch==2.7.0.dev20250226+rocm6.3
torchaudio==2.6.0.dev20250226+rocm6.3
tqdm==4.67.1
typing_extensions==4.12.2

View File

@@ -1,189 +1,254 @@
import argparse
import datetime
import os
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.optim as optim import torch.optim as optim
import torch.nn.functional as F
import torchaudio
import tqdm import tqdm
from accelerate import Accelerator
from torch.utils.data import DataLoader, DistributedSampler
import argparse
import math
import os
from torch.utils.data import random_split
from torch.utils.data import DataLoader
import AudioUtils
from data import AudioDataset from data import AudioDataset
from generator import SISUGenerator
from discriminator import SISUDiscriminator from discriminator import SISUDiscriminator
from generator import SISUGenerator
from utils.TrainingTools import discriminator_train, generator_train
import torchaudio.transforms as T # ---------------------------
# Argument parsing
# Init script argument parser # ---------------------------
parser = argparse.ArgumentParser(description="Training script") parser = argparse.ArgumentParser(description="Training script (safer defaults)")
parser.add_argument("--generator", type=str, default=None, parser.add_argument("--resume", action="store_true", help="Resume training")
help="Path to the generator model file") parser.add_argument(
parser.add_argument("--discriminator", type=str, default=None, "--epochs", type=int, default=5000, help="Number of training epochs"
help="Path to the discriminator model file") )
parser.add_argument("--device", type=str, default="cpu", help="Select device") parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
parser.add_argument("--epoch", type=int, default=0, help="Current epoch for model versioning") parser.add_argument("--num_workers", type=int, default=2, help="DataLoader num_workers")
parser.add_argument("--verbose", action="store_true", help="Increase output verbosity") parser.add_argument("--debug", action="store_true", help="Print debug logs")
parser.add_argument(
"--no_pin_memory", action="store_true", help="Disable pin_memory even on CUDA"
)
args = parser.parse_args() args = parser.parse_args()
device = torch.device(args.device if torch.cuda.is_available() else "cpu") # ---------------------------
print(f"Using device: {device}") # Init accelerator
# ---------------------------
mfcc_transform = T.MFCC( accelerator = Accelerator(mixed_precision="bf16")
sample_rate=44100, # Adjust to your sample rate
n_mfcc=20,
melkwargs={'n_fft': 2048, 'hop_length': 512} # adjust n_fft and hop_length to your needs.
).to(device)
def gpu_mfcc_loss(y_true, y_pred): # ---------------------------
mfccs_true = mfcc_transform(y_true) # Models
mfccs_pred = mfcc_transform(y_pred) # ---------------------------
min_len = min(mfccs_true.shape[2], mfccs_pred.shape[2])
mfccs_true = mfccs_true[:, :, :min_len]
mfccs_pred = mfccs_pred[:, :, :min_len]
return torch.mean((mfccs_true - mfccs_pred)**2)
def discriminator_train(high_quality, low_quality, real_labels, fake_labels):
optimizer_d.zero_grad()
# Forward pass for real samples
discriminator_decision_from_real = discriminator(high_quality[0])
d_loss_real = criterion_d(discriminator_decision_from_real, real_labels)
# Forward pass for fake samples (from generator output)
generator_output = generator(low_quality[0])
discriminator_decision_from_fake = discriminator(generator_output.detach())
d_loss_fake = criterion_d(discriminator_decision_from_fake, fake_labels)
# Combine real and fake losses
d_loss = (d_loss_real + d_loss_fake) / 2.0
# Backward pass and optimization
d_loss.backward()
nn.utils.clip_grad_norm_(discriminator.parameters(), max_norm=1.0) # Gradient Clipping
optimizer_d.step()
return d_loss
def generator_train(low_quality, high_quality, real_labels):
optimizer_g.zero_grad()
# Forward pass for fake samples (from generator output)
generator_output = generator(low_quality[0])
mfcc_l = gpu_mfcc_loss(high_quality[0], generator_output)
discriminator_decision = discriminator(generator_output)
adversarial_loss = criterion_g(discriminator_decision, real_labels)
combined_loss = adversarial_loss + 0.5 * mfcc_l
combined_loss.backward()
optimizer_g.step()
return (generator_output, combined_loss, adversarial_loss, mfcc_l)
debug = args.verbose
# Initialize dataset and dataloader
dataset_dir = './dataset/good'
dataset = AudioDataset(dataset_dir, device)
# ========= SINGLE =========
train_data_loader = DataLoader(dataset, batch_size=128, shuffle=True)
# Initialize models and move them to device
generator = SISUGenerator() generator = SISUGenerator()
discriminator = SISUDiscriminator() discriminator = SISUDiscriminator()
epoch: int = args.epoch accelerator.print("🔨 | Compiling models...")
generator = generator.to(device) generator = torch.compile(generator)
discriminator = discriminator.to(device) discriminator = torch.compile(discriminator)
if args.generator is not None: accelerator.print("✅ | Compiling done!")
generator.load_state_dict(torch.load(args.generator, map_location=device, weights_only=True))
if args.discriminator is not None:
discriminator.load_state_dict(torch.load(args.discriminator, map_location=device, weights_only=True))
# Loss # ---------------------------
criterion_g = nn.MSELoss() # Dataset / DataLoader
criterion_d = nn.BCELoss() # ---------------------------
accelerator.print("📊 | Fetching dataset...")
dataset = AudioDataset("./dataset", 8192)
# Optimizers sampler = DistributedSampler(dataset) if accelerator.num_processes > 1 else None
optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999)) pin_memory = torch.cuda.is_available() and not args.no_pin_memory
optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))
# Scheduler train_loader = DataLoader(
scheduler_g = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_g, mode='min', factor=0.5, patience=5) dataset,
scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min', factor=0.5, patience=5) sampler=sampler,
batch_size=args.batch_size,
shuffle=(sampler is None),
num_workers=args.num_workers,
pin_memory=pin_memory,
persistent_workers=pin_memory,
)
models_dir = "models" if not train_loader or not train_loader.batch_size or train_loader.batch_size == 0:
accelerator.print("🪹 | There is no data to train with! Exiting...")
exit()
loader_batch_size = train_loader.batch_size
accelerator.print("✅ | Dataset fetched!")
# ---------------------------
# Losses / Optimizers / Scalers
# ---------------------------
optimizer_g = optim.AdamW(
generator.parameters(), lr=0.0003, betas=(0.5, 0.999), weight_decay=0.0001
)
optimizer_d = optim.AdamW(
discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999), weight_decay=0.0001
)
scheduler_g = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer_g, mode="min", factor=0.5, patience=5
)
scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer_d, mode="min", factor=0.5, patience=5
)
criterion_d = nn.MSELoss()
# ---------------------------
# Prepare accelerator
# ---------------------------
generator, discriminator, optimizer_g, optimizer_d, train_loader = accelerator.prepare(
generator, discriminator, optimizer_g, optimizer_d, train_loader
)
# ---------------------------
# Checkpoint helpers
# ---------------------------
models_dir = "./models"
os.makedirs(models_dir, exist_ok=True) os.makedirs(models_dir, exist_ok=True)
def start_training():
generator_epochs = 5000
for generator_epoch in range(generator_epochs):
low_quality_audio = (torch.empty((1)), 1)
high_quality_audio = (torch.empty((1)), 1)
ai_enhanced_audio = (torch.empty((1)), 1)
times_correct = 0 def save_ckpt(path, epoch):
accelerator.wait_for_everyone()
if accelerator.is_main_process:
accelerator.save(
{
"epoch": epoch,
"G": accelerator.unwrap_model(generator).state_dict(),
"D": accelerator.unwrap_model(discriminator).state_dict(),
"optG": optimizer_g.state_dict(),
"optD": optimizer_d.state_dict(),
"schedG": scheduler_g.state_dict(),
"schedD": scheduler_d.state_dict(),
},
path,
)
# ========= TRAINING =========
for high_quality_clip, low_quality_clip in tqdm.tqdm(train_data_loader, desc=f"Training epoch {generator_epoch+1}/{generator_epochs}, Current epoch {epoch+1}"):
# for high_quality_clip, low_quality_clip in train_data_loader:
high_quality_sample = (high_quality_clip[0], high_quality_clip[1])
low_quality_sample = (low_quality_clip[0], low_quality_clip[1])
# ========= LABELS ========= start_epoch = 0
batch_size = high_quality_clip[0].size(0) if args.resume:
real_labels = torch.ones(batch_size, 1).to(device) ckpt_path = os.path.join(models_dir, "last.pt")
fake_labels = torch.zeros(batch_size, 1).to(device) ckpt = torch.load(ckpt_path)
# ========= DISCRIMINATOR ========= accelerator.unwrap_model(generator).load_state_dict(ckpt["G"])
discriminator.train() accelerator.unwrap_model(discriminator).load_state_dict(ckpt["D"])
d_loss = discriminator_train(high_quality_sample, low_quality_sample, real_labels, fake_labels) optimizer_g.load_state_dict(ckpt["optG"])
optimizer_d.load_state_dict(ckpt["optD"])
scheduler_g.load_state_dict(ckpt["schedG"])
scheduler_d.load_state_dict(ckpt["schedD"])
# ========= GENERATOR ========= start_epoch = ckpt.get("epoch", 1)
generator.train() accelerator.print(f"🔁 | Resumed from epoch {start_epoch}!")
generator_output, combined_loss, adversarial_loss, mfcc_l = generator_train(low_quality_sample, high_quality_sample, real_labels)
if debug: real_buf = torch.full((loader_batch_size, 1), 1, device=accelerator.device, dtype=torch.float32)
print(d_loss, combined_loss, adversarial_loss, mfcc_l) fake_buf = torch.zeros((loader_batch_size, 1), device=accelerator.device, dtype=torch.float32)
scheduler_d.step(d_loss)
scheduler_g.step(combined_loss)
# ========= SAVE LATEST AUDIO ========= accelerator.print("🏋️ | Started training...")
high_quality_audio = (high_quality_clip[0][0], high_quality_clip[1][0])
low_quality_audio = (low_quality_clip[0][0], low_quality_clip[1][0])
ai_enhanced_audio = (generator_output[0], high_quality_clip[1][0])
new_epoch = generator_epoch+epoch try:
for epoch in range(start_epoch, args.epochs):
generator.train()
discriminator.train()
if generator_epoch % 10 == 0: discriminator_time = 0
print(f"Saved epoch {new_epoch}!") generator_time = 0
torchaudio.save(f"./output/epoch-{new_epoch}-audio-crap.wav", low_quality_audio[0].cpu(), high_quality_audio[1]) # <-- Because audio clip was resampled in data.py from original to crap and to original again.
torchaudio.save(f"./output/epoch-{new_epoch}-audio-ai.wav", ai_enhanced_audio[0].cpu(), ai_enhanced_audio[1])
torchaudio.save(f"./output/epoch-{new_epoch}-audio-orig.wav", high_quality_audio[0].cpu(), high_quality_audio[1])
if debug: running_d, running_g, steps = 0.0, 0.0, 0
print(generator.state_dict().keys())
print(discriminator.state_dict().keys())
torch.save(discriminator.state_dict(), f"{models_dir}/discriminator_epoch_{new_epoch}.pt")
torch.save(generator.state_dict(), f"{models_dir}/generator_epoch_{new_epoch}.pt")
torch.save(discriminator, "models/epoch-5000-discriminator.pt") progress_bar = tqdm.tqdm(train_loader, desc=f"Epoch {epoch} | D {discriminator_time}μs | G {generator_time}μs")
torch.save(generator, "models/epoch-5000-generator.pt")
print("Training complete!")
start_training() for i, (
(high_quality, low_quality),
(high_sample_rate, low_sample_rate),
) in enumerate(progress_bar):
batch_size = high_quality.size(0)
real_labels = real_buf[:batch_size].to(accelerator.device)
fake_labels = fake_buf[:batch_size].to(accelerator.device)
with accelerator.autocast():
generator_output = generator(low_quality)
# --- Discriminator ---
d_time = datetime.datetime.now()
optimizer_d.zero_grad(set_to_none=True)
with accelerator.autocast():
d_loss = discriminator_train(
high_quality,
low_quality.detach(),
real_labels,
fake_labels,
discriminator,
criterion_d,
generator_output.detach()
)
accelerator.backward(d_loss)
optimizer_d.step()
discriminator_time = (datetime.datetime.now() - d_time).microseconds
# --- Generator ---
g_time = datetime.datetime.now()
optimizer_g.zero_grad(set_to_none=True)
with accelerator.autocast():
g_total, g_adv = generator_train(
low_quality,
high_quality,
real_labels,
generator,
discriminator,
criterion_d,
generator_output
)
accelerator.backward(g_total)
torch.nn.utils.clip_grad_norm_(generator.parameters(), 1)
optimizer_g.step()
generator_time = (datetime.datetime.now() - g_time).microseconds
d_val = accelerator.gather(d_loss.detach()).mean()
g_val = accelerator.gather(g_total.detach()).mean()
if torch.isfinite(d_val):
running_d += d_val.item()
else:
accelerator.print(
f"🫥 | NaN in discriminator loss at step {i}, skipping update."
)
if torch.isfinite(g_val):
running_g += g_val.item()
else:
accelerator.print(
f"🫥 | NaN in generator loss at step {i}, skipping update."
)
steps += 1
progress_bar.set_description(f"Epoch {epoch} | D {discriminator_time}μs | G {generator_time}μs")
# epoch averages & schedulers
if steps == 0:
accelerator.print("🪹 | No steps in epoch (empty dataloader?). Exiting.")
break
mean_d = running_d / steps
mean_g = running_g / steps
scheduler_d.step(mean_d)
scheduler_g.step(mean_g)
save_ckpt(os.path.join(models_dir, "last.pt"), epoch)
accelerator.print(f"🤝 | Epoch {epoch} done | D {mean_d:.4f} | G {mean_g:.4f}")
except Exception:
try:
save_ckpt(os.path.join(models_dir, "crash_last.pt"), epoch)
accelerator.print(f"💾 | Saved crash checkpoint for epoch {epoch}")
except Exception as e:
accelerator.print("😬 | Failed saving crash checkpoint:", e)
raise
accelerator.print("🏁 | Training finished.")

View File

@@ -0,0 +1,68 @@
from typing import Dict, List
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio.transforms as T
class MultiResolutionSTFTLoss(nn.Module):
def __init__(
self,
fft_sizes: List[int] = [512, 1024, 2048, 4096, 8192],
hop_sizes: List[int] = [64, 128, 256, 512, 1024],
win_lengths: List[int] = [256, 512, 1024, 2048, 4096],
eps: float = 1e-7,
center: bool = True
):
super().__init__()
self.eps = eps
self.n_resolutions = len(fft_sizes)
self.stft_transforms = nn.ModuleList()
for i, (n_fft, hop_len, win_len) in enumerate(zip(fft_sizes, hop_sizes, win_lengths)):
stft = T.Spectrogram(
n_fft=n_fft,
hop_length=hop_len,
win_length=win_len,
window_fn=torch.hann_window,
power=None,
center=center,
pad_mode="reflect",
normalized=False,
)
self.stft_transforms.append(stft)
def forward(
self, y_true: torch.Tensor, y_pred: torch.Tensor
) -> Dict[str, torch.Tensor]:
if y_true.dim() == 3 and y_true.size(1) == 1:
y_true = y_true.squeeze(1)
if y_pred.dim() == 3 and y_pred.size(1) == 1:
y_pred = y_pred.squeeze(1)
sc_loss = 0.0
mag_loss = 0.0
for stft in self.stft_transforms:
stft.window = stft.window.to(y_true.device)
stft_true = stft(y_true)
stft_pred = stft(y_pred)
stft_mag_true = torch.abs(stft_true)
stft_mag_pred = torch.abs(stft_pred)
norm_true = torch.linalg.norm(stft_mag_true, dim=(-2, -1))
norm_diff = torch.linalg.norm(stft_mag_true - stft_mag_pred, dim=(-2, -1))
sc_loss += torch.mean(norm_diff / (norm_true + self.eps))
log_mag_pred = torch.log(stft_mag_pred + self.eps)
log_mag_true = torch.log(stft_mag_true + self.eps)
mag_loss += F.l1_loss(log_mag_pred, log_mag_true)
sc_loss /= self.n_resolutions
mag_loss /= self.n_resolutions
total_loss = sc_loss + mag_loss
return {"total": total_loss, "sc": sc_loss, "mag": mag_loss}

58
utils/TrainingTools.py Normal file
View File

@@ -0,0 +1,58 @@
import torch
from utils.MultiResolutionSTFTLoss import MultiResolutionSTFTLoss
# stft_loss_fn = MultiResolutionSTFTLoss(
# fft_sizes=[512, 1024, 2048, 4096],
# hop_sizes=[128, 256, 512, 1024],
# win_lengths=[512, 1024, 2048, 4096]
# )
stft_loss_fn = MultiResolutionSTFTLoss(
fft_sizes=[512, 1024, 2048],
hop_sizes=[64, 128, 256],
win_lengths=[256, 512, 1024]
)
def signal_mae(input_one: torch.Tensor, input_two: torch.Tensor) -> torch.Tensor:
absolute_difference = torch.abs(input_one - input_two)
return torch.mean(absolute_difference)
def discriminator_train(
high_quality,
low_quality,
high_labels,
low_labels,
discriminator,
criterion,
generator_output
):
real_pair = torch.cat((low_quality, high_quality), dim=1)
decision_real = discriminator(real_pair)
d_loss_real = criterion(decision_real, high_labels)
fake_pair = torch.cat((low_quality, generator_output), dim=1)
decision_fake = discriminator(fake_pair)
d_loss_fake = criterion(decision_fake, low_labels)
d_loss = (d_loss_real + d_loss_fake) / 2.0
return d_loss
def generator_train(
low_quality, high_quality, real_labels, generator, discriminator, adv_criterion, generator_output):
fake_pair = torch.cat((low_quality, generator_output), dim=1)
discriminator_decision = discriminator(fake_pair)
adversarial_loss = adv_criterion(discriminator_decision, real_labels)
mae_loss = signal_mae(generator_output, high_quality)
stft_loss = stft_loss_fn(high_quality, generator_output)["total"]
lambda_mae = 10.0
lambda_stft = 2.5
lambda_adv = 2.5
combined_loss = (lambda_mae * mae_loss) + (lambda_stft * stft_loss) + (lambda_adv * adversarial_loss)
return combined_loss, adversarial_loss

0
utils/__init__.py Normal file
View File