:albemic: | Real-time testing...
This commit is contained in:
32
data.py
32
data.py
@ -21,33 +21,25 @@ class AudioDataset(Dataset):
|
||||
def __getitem__(self, idx):
|
||||
# Load high-quality audio
|
||||
high_quality_audio, original_sample_rate = torchaudio.load(self.input_files[idx], normalize=True)
|
||||
# Change to mono
|
||||
high_quality_audio = AudioUtils.stereo_tensor_to_mono(high_quality_audio)
|
||||
|
||||
# Generate low-quality audio with random downsampling
|
||||
mangled_sample_rate = random.choice(self.audio_sample_rates)
|
||||
resample_transform_low = torchaudio.transforms.Resample(original_sample_rate, mangled_sample_rate)
|
||||
low_quality_audio = resample_transform_low(high_quality_audio)
|
||||
|
||||
resample_transform_low = torchaudio.transforms.Resample(original_sample_rate, mangled_sample_rate)
|
||||
resample_transform_high = torchaudio.transforms.Resample(mangled_sample_rate, original_sample_rate)
|
||||
|
||||
low_quality_audio = resample_transform_low(high_quality_audio)
|
||||
low_quality_audio = resample_transform_high(low_quality_audio)
|
||||
|
||||
high_quality_audio = AudioUtils.stereo_tensor_to_mono(high_quality_audio)
|
||||
low_quality_audio = AudioUtils.stereo_tensor_to_mono(low_quality_audio)
|
||||
|
||||
# Pad or truncate high-quality audio
|
||||
if high_quality_audio.shape[1] < self.MAX_LENGTH:
|
||||
padding = self.MAX_LENGTH - high_quality_audio.shape[1]
|
||||
high_quality_audio = F.pad(high_quality_audio, (0, padding))
|
||||
elif high_quality_audio.shape[1] > self.MAX_LENGTH:
|
||||
high_quality_audio = high_quality_audio[:, :self.MAX_LENGTH]
|
||||
splitted_high_quality_audio = AudioUtils.split_audio(high_quality_audio, 128)
|
||||
splitted_high_quality_audio[-1] = AudioUtils.pad_tensor(splitted_high_quality_audio[-1], 128)
|
||||
splitted_high_quality_audio = [tensor.to(self.device) for tensor in splitted_high_quality_audio]
|
||||
|
||||
# Pad or truncate low-quality audio
|
||||
if low_quality_audio.shape[1] < self.MAX_LENGTH:
|
||||
padding = self.MAX_LENGTH - low_quality_audio.shape[1]
|
||||
low_quality_audio = F.pad(low_quality_audio, (0, padding))
|
||||
elif low_quality_audio.shape[1] > self.MAX_LENGTH:
|
||||
low_quality_audio = low_quality_audio[:, :self.MAX_LENGTH]
|
||||
splitted_low_quality_audio = AudioUtils.split_audio(low_quality_audio, 128)
|
||||
splitted_low_quality_audio[-1] = AudioUtils.pad_tensor(splitted_low_quality_audio[-1], 128)
|
||||
splitted_low_quality_audio = [tensor.to(self.device) for tensor in splitted_low_quality_audio]
|
||||
|
||||
high_quality_audio = high_quality_audio.to(self.device)
|
||||
low_quality_audio = low_quality_audio.to(self.device)
|
||||
|
||||
return (high_quality_audio, original_sample_rate), (low_quality_audio, mangled_sample_rate)
|
||||
return (splitted_high_quality_audio, original_sample_rate), (splitted_low_quality_audio, mangled_sample_rate)
|
||||
|
Reference in New Issue
Block a user