CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
amanchadha

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: amanchadha/coursera-deep-learning-specialization
Path: blob/master/C5 - Sequence Models/Week 3/Machine Translation/generateTestCases.py
Views: 4819
1
# New Generate Test Cases
2
import numpy as np
3
import math
4
import os,sys
5
from testCase import get_testCase
6
# import copy
7
# from keras.callbacks import History
8
# import tensorflow as tf
9
sys.path.append('../')
10
sys.path.append('../../')
11
12
from grader_support import stdout_redirector
13
from grader_support import util
14
15
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
16
17
# This grader is for the Emojify assignment
18
19
mFiles = [
20
"one_step_attention.py",
21
"model.py"
22
]
23
class suppress_stdout_stderr(object):
24
'''
25
A context manager for doing a "deep suppression" of stdout and stderr in
26
Python, i.e. will suppress all print, even if the print originates in a
27
compiled C/Fortran sub-function.
28
This will not suppress raised exceptions, since exceptions are printed
29
to stderr just before a script exits, and after the context manager has
30
exited (at least, I think that is why it lets exceptions through).
31
32
'''
33
def __init__(self):
34
# Open a pair of null files
35
self.null_fds = [os.open(os.devnull,os.O_RDWR) for x in range(2)]
36
# Save the actual stdout (1) and stderr (2) file descriptors.
37
self.save_fds = [os.dup(1), os.dup(2)]
38
39
def __enter__(self):
40
# Assign the null pointers to stdout and stderr.
41
os.dup2(self.null_fds[0],1)
42
os.dup2(self.null_fds[1],2)
43
44
def __exit__(self, *_):
45
# Re-assign the real stdout/stderr back to (1) and (2)
46
os.dup2(self.save_fds[0],1)
47
os.dup2(self.save_fds[1],2)
48
# Close all file descriptors
49
for fd in self.null_fds + self.save_fds:
50
os.close(fd)
51
np.random.seed(3)
52
with suppress_stdout_stderr():
53
from solutions import *
54
from testCase import get_testCase
55
n_a = 64
56
n_s = 128
57
m = 10
58
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
59
60
human_vocab_size = len(human_vocab)
61
machine_vocab_size = len(machine_vocab)
62
63
im = model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size)
64
cp1 = im.count_params()
65
mi1 = len(im.inputs)
66
mo1 = len(im.outputs)
67
ml1 = len(im.layers)
68
m_out1 = np.asarray((cp1, mi1, mo1, ml1))
69
70
71
# GRADED FUNCTION: one_step_attention
72
73
m_out2 = get_testCase()
74
75
def generateTestCases():
76
testCases = {
77
'one_step_attention': {
78
'partId': 'zcQIs',
79
'testCases': [
80
{
81
'testInput': 0,
82
'testOutput': m_out2
83
}
84
]
85
},
86
'model': {
87
'partId': 'PTKef',
88
'testCases': [
89
{
90
'testInput': (Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size),
91
'testOutput': m_out1
92
}
93
]
94
}
95
}
96
return testCases
97
98
99