⚗️ | Experimenting with other generator architectures.
This commit is contained in:
parent
de72ee31ea
commit
1000692f32
26
generator.py
26
generator.py
@ -1,27 +1,39 @@
|
|||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
|
|
||||||
class SISUGenerator(nn.Module):
|
class SISUGenerator(nn.Module):
|
||||||
def __init__(self, upscale_scale=1): # No noise_dim parameter
|
def __init__(self, upscale_scale=1):
|
||||||
super(SISUGenerator, self).__init__()
|
super(SISUGenerator, self).__init__()
|
||||||
self.layers1 = nn.Sequential(
|
self.layers1 = nn.Sequential(
|
||||||
nn.Conv1d(2, 128, kernel_size=3, padding=1),
|
nn.Conv1d(2, 128, kernel_size=3, padding=1),
|
||||||
# nn.LeakyReLU(0.2, inplace=True),
|
nn.LeakyReLU(0.2, inplace=True), # Activation
|
||||||
|
nn.BatchNorm1d(128), # Batch Norm
|
||||||
nn.Conv1d(128, 256, kernel_size=3, padding=1),
|
nn.Conv1d(128, 256, kernel_size=3, padding=1),
|
||||||
# nn.LeakyReLU(0.2, inplace=True),
|
nn.LeakyReLU(0.2, inplace=True), # Activation
|
||||||
|
nn.BatchNorm1d(256), # Batch Norm
|
||||||
)
|
)
|
||||||
|
|
||||||
self.layers2 = nn.Sequential(
|
self.layers2 = nn.Sequential(
|
||||||
nn.Conv1d(256, 128, kernel_size=3, padding=1),
|
nn.Conv1d(256, 128, kernel_size=3, padding=1),
|
||||||
# nn.LeakyReLU(0.2, inplace=True),
|
nn.LeakyReLU(0.2, inplace=True), # Activation
|
||||||
|
nn.BatchNorm1d(128), # Batch Norm
|
||||||
nn.Conv1d(128, 64, kernel_size=3, padding=1),
|
nn.Conv1d(128, 64, kernel_size=3, padding=1),
|
||||||
# nn.LeakyReLU(0.2, inplace=True),
|
nn.LeakyReLU(0.2, inplace=True), # Activation
|
||||||
nn.Conv1d(64, 2, kernel_size=3, padding=1),
|
nn.BatchNorm1d(64), # Batch Norm
|
||||||
# nn.Tanh()
|
nn.Conv1d(64, upscale_scale * 2, kernel_size=3, padding=1), # Output channels scaled
|
||||||
)
|
)
|
||||||
|
self.upscale_factor = upscale_scale
|
||||||
|
|
||||||
|
def pixel_shuffle_1d(self, input, upscale_factor):
|
||||||
|
batch_size, channels, in_width = input.size()
|
||||||
|
out_width = in_width * upscale_factor
|
||||||
|
input_view = input.contiguous().view(batch_size, channels // upscale_factor, upscale_factor, in_width)
|
||||||
|
shuffle_out = input_view.permute(0, 1, 3, 2).contiguous()
|
||||||
|
return shuffle_out.view(batch_size, channels // upscale_factor, out_width)
|
||||||
|
|
||||||
def forward(self, x, scale):
|
def forward(self, x, scale):
|
||||||
x = self.layers1(x)
|
x = self.layers1(x)
|
||||||
upsample = nn.Upsample(scale_factor=scale, mode='nearest')
|
upsample = nn.Upsample(scale_factor=scale, mode='nearest')
|
||||||
x = upsample(x)
|
x = upsample(x)
|
||||||
x = self.layers2(x)
|
x = self.layers2(x)
|
||||||
|
x = self.pixel_shuffle_1d(x, self.upscale_factor)
|
||||||
return x
|
return x
|
||||||
|
Loading…
Reference in New Issue
Block a user