CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
y33-j3T

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: y33-j3T/Coursera-Deep-Learning
Path: blob/master/Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py
Views: 13373
1
import numpy as np
2
import trax
3
#from trax import layers as tl
4
#from trax.fastmath import numpy as fastnp
5
#from trax.supervised import training
6
7
# UNIT TEST for UNQ_C1
8
def test_get_conversation(target):
9
10
data = {'file1.json': {'log':[{'text': 'hi'},
11
{'text': 'hello'},
12
{'text': 'nice'}]},
13
'file2.json':{'log':[{'text': 'a b'},
14
{'text': ''},
15
{'text': 'good '},
16
{'text': 'no?'}]}}
17
18
res1 = target('file1.json', data)
19
res2 = target('file2.json', data)
20
21
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
22
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
23
24
success = 0
25
fails = 0
26
27
try:
28
assert res1 == expected1
29
success += 1
30
except ValueError:
31
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
32
fails += 1
33
try:
34
assert res2 == expected2
35
success += 1
36
except:
37
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
38
fails += 1
39
40
if fails == 0:
41
print("\033[92m All tests passed")
42
else:
43
print('\033[92m', success," Tests passed")
44
print('\033[91m', fails, " Tests failed")
45
46
47
# UNIT TEST for UNQ_C2
48
def test_reversible_layer_forward(target):
49
f1 = lambda x: x + 2
50
g1 = lambda x: x * 3
51
52
f2 = lambda x: x + 1
53
g2 = lambda x: x * 2
54
55
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
56
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
57
58
input_vector2 = np.array([1] * 128)
59
expected2 = np.array([3] * 64 + [7] * 64)
60
61
success = 0
62
fails = 0
63
try:
64
res = target(input_vector1, f1, g1)
65
assert isinstance(res, np.ndarray)
66
success += 1
67
except:
68
print('Wrong type! Output is not of type np.ndarray')
69
fails += 1
70
try:
71
res = target(input_vector1, f1, g1)
72
assert np.allclose(res, expected1)
73
success += 1
74
except ValueError:
75
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
76
fails += 1
77
try:
78
res = target(input_vector2, f2, g2)
79
assert np.allclose(res, expected2)
80
success += 1
81
except:
82
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
83
fails += 1
84
85
if fails == 0:
86
print("\033[92m All tests passed")
87
else:
88
print('\033[92m', success," Tests passed")
89
print('\033[91m', fails, " Tests failed")
90
91
92
# UNIT TEST for UNQ_C3
93
def test_reversible_layer_reverse(target):
94
95
f1 = lambda x: x + 2
96
g1 = lambda x: x * 3
97
98
f2 = lambda x: x + 1
99
g2 = lambda x: x * 2
100
101
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
102
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
103
104
input_vector2 = np.array([1] * 128)
105
expected2 = np.array([1] * 64 + [-1] * 64)
106
107
success = 0
108
fails = 0
109
try:
110
res = target(input_vector1, f1, g1)
111
assert isinstance(res, np.ndarray)
112
success += 1
113
except:
114
print('Wrong type! Output is not of type np.ndarray')
115
fails += 1
116
try:
117
res = target(input_vector1, f1, g1)
118
assert np.allclose(res, expected1)
119
success += 1
120
except ValueError:
121
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
122
fails += 1
123
try:
124
res = target(input_vector2, f2, g2)
125
assert np.allclose(res, expected2)
126
success += 1
127
except:
128
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
129
fails += 1
130
131
if fails == 0:
132
print("\033[92m All tests passed")
133
else:
134
print('\033[92m', success," Tests passed")
135
print('\033[91m', fails, " Tests failed")
136
137
138
# UNIT TEST for UNQ_C4
139
def test_ReformerLM(target):
140
test_cases = [
141
{
142
"name":"layer_len_check",
143
"expected":11,
144
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
145
},
146
{
147
"name":"simple_test_check",
148
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
149
"error":"The ReformerLM is not defined properly."
150
}
151
]
152
temp_model = target('train')
153
154
success = 0
155
fails = 0
156
157
for test_case in test_cases:
158
try:
159
if test_case['name'] == "simple_test_check":
160
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
161
success += 1
162
if test_case['name'] == "layer_len_check":
163
if test_case["expected"] == len(temp_model.sublayers):
164
success += 1
165
else:
166
print(test_case["error"].format(len(temp_model.sublayers)))
167
fails += 1
168
except:
169
print(test_case['error'])
170
fails += 1
171
172
if fails == 0:
173
print("\033[92m All tests passed")
174
else:
175
print('\033[92m', success," Tests passed")
176
print('\033[91m', fails, " Tests failed")
177
178
179
# UNIT TEST for UNQ_C5
180
def test_tasks(train_task, eval_task):
181
target = train_task
182
success = 0
183
fails = 0
184
185
# Test the labeled data parameter for train_task
186
try:
187
strlabel = str(target._labeled_data)
188
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
189
success += 1
190
except:
191
fails += 1
192
print("Wrong labeled data parameter in train_task")
193
194
# Test the cross entropy loss data parameter
195
try:
196
strlabel = str(target._loss_layer)
197
assert(strlabel == "CrossEntropyLoss_in3")
198
success += 1
199
except:
200
fails += 1
201
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
202
203
# Test the optimizer parameter
204
try:
205
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
206
success += 1
207
except:
208
fails += 1
209
print("Wrong optimizer")
210
211
# Test the schedule parameter
212
try:
213
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
214
success += 1
215
except:
216
fails += 1
217
print("Wrong learning rate schedule type")
218
219
# Test the _n_steps_per_checkpoint parameter
220
try:
221
assert(target._n_steps_per_checkpoint==10)
222
success += 1
223
except:
224
fails += 1
225
print("Wrong checkpoint step frequency")
226
227
target = eval_task
228
# Test the labeled data parameter for eval_task
229
try:
230
strlabel = str(target._labeled_data)
231
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
232
success += 1
233
except:
234
fails += 1
235
print("Wrong labeled data parameter in eval_task")
236
237
# Test the metrics in eval_task
238
try:
239
strlabel = str(target._metrics).replace(' ', '')
240
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
241
success += 1
242
except:
243
fails += 1
244
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
245
246
247
if fails == 0:
248
print("\033[92m All tests passed")
249
else:
250
print('\033[92m', success," Tests passed")
251
print('\033[91m', fails, " Tests failed")
252
253
254
255