TTS/tests/layers_tests.py

89 lines
2.9 KiB
Python
Raw Normal View History

2018-02-08 18:10:11 +00:00
import unittest
import torch as T
2018-12-11 14:07:50 +00:00
from layers.tacotron import Prenet, CBHG, Decoder, Encoder
from layers.losses import L1LossMasked
from utils.generic_utils import sequence_mask
2018-02-08 18:10:11 +00:00
class PrenetTests(unittest.TestCase):
2018-02-13 16:08:23 +00:00
def test_in_out(self):
layer = Prenet(128, out_features=[256, 128])
2018-05-10 22:59:05 +00:00
dummy_input = T.rand(4, 128)
2018-02-08 18:10:11 +00:00
2018-02-13 16:08:23 +00:00
print(layer)
output = layer(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 128
2018-02-08 18:10:11 +00:00
class CBHGTests(unittest.TestCase):
2018-02-13 16:08:23 +00:00
def test_in_out(self):
2018-08-13 13:02:30 +00:00
layer = self.cbhg = CBHG(
128,
K=8,
conv_bank_features=80,
conv_projections=[160, 128],
highway_features=80,
gru_features=80,
num_highways=4)
2018-05-10 22:59:05 +00:00
dummy_input = T.rand(4, 8, 128)
2018-02-08 18:10:11 +00:00
2018-02-13 16:08:23 +00:00
print(layer)
output = layer(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 8
2018-08-13 13:02:30 +00:00
assert output.shape[2] == 160
2018-02-08 18:10:11 +00:00
class DecoderTests(unittest.TestCase):
2018-02-13 16:08:23 +00:00
def test_in_out(self):
2019-02-25 17:34:06 +00:00
layer = Decoder(in_features=256, memory_dim=80, r=2, memory_size=4, attn_windowing=False)
2018-05-10 22:59:05 +00:00
dummy_input = T.rand(4, 8, 256)
dummy_memory = T.rand(4, 2, 80)
2018-02-13 16:08:23 +00:00
2019-03-06 12:43:29 +00:00
output, alignment, stop_tokens = layer(dummy_input, dummy_memory, mask=None)
2018-04-03 10:24:57 +00:00
2018-02-13 16:08:23 +00:00
assert output.shape[0] == 4
assert output.shape[1] == 1, "size not {}".format(output.shape[1])
assert output.shape[2] == 80 * 2, "size not {}".format(output.shape[2])
assert stop_tokens.shape[0] == 4
assert stop_tokens.max() <= 1.0
assert stop_tokens.min() >= 0
2018-04-03 10:24:57 +00:00
2018-02-13 16:08:23 +00:00
class EncoderTests(unittest.TestCase):
def test_in_out(self):
layer = Encoder(128)
2018-05-10 22:59:05 +00:00
dummy_input = T.rand(4, 8, 128)
2018-02-13 16:08:23 +00:00
print(layer)
output = layer(dummy_input)
print(output.shape)
assert output.shape[0] == 4
assert output.shape[1] == 8
assert output.shape[2] == 256 # 128 * 2 BiRNN
2018-04-03 10:24:57 +00:00
2018-02-13 16:08:23 +00:00
2018-03-25 02:22:45 +00:00
class L1LossMaskedTests(unittest.TestCase):
def test_in_out(self):
layer = L1LossMasked()
2018-05-10 22:59:05 +00:00
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
2018-03-25 02:22:45 +00:00
output = layer(dummy_input, dummy_target, dummy_length)
2018-05-10 22:59:05 +00:00
assert output.item() == 0.0
2018-04-03 10:24:57 +00:00
2018-05-10 22:59:05 +00:00
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
2018-03-25 02:22:45 +00:00
output = layer(dummy_input, dummy_target, dummy_length)
2018-05-10 22:59:05 +00:00
assert output.item() == 1.0, "1.0 vs {}".format(output.data[0])
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
2018-08-02 14:34:17 +00:00
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
2018-03-25 02:22:45 +00:00
output = layer(dummy_input + mask, dummy_target, dummy_length)
2018-05-10 22:59:05 +00:00
assert output.item() == 1.0, "1.0 vs {}".format(output.data[0])