Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/C5 - Sequence Models/Week 3/Machine Translation/generateTestCases.py
Views: 4819
# New Generate Test Cases1import numpy as np2import math3import os,sys4from testCase import get_testCase5# import copy6# from keras.callbacks import History7# import tensorflow as tf8sys.path.append('../')9sys.path.append('../../')1011from grader_support import stdout_redirector12from grader_support import util1314os.environ['TF_CPP_MIN_LOG_LEVEL']='3'1516# This grader is for the Emojify assignment1718mFiles = [19"one_step_attention.py",20"model.py"21]22class suppress_stdout_stderr(object):23'''24A context manager for doing a "deep suppression" of stdout and stderr in25Python, i.e. will suppress all print, even if the print originates in a26compiled C/Fortran sub-function.27This will not suppress raised exceptions, since exceptions are printed28to stderr just before a script exits, and after the context manager has29exited (at least, I think that is why it lets exceptions through).3031'''32def __init__(self):33# Open a pair of null files34self.null_fds = [os.open(os.devnull,os.O_RDWR) for x in range(2)]35# Save the actual stdout (1) and stderr (2) file descriptors.36self.save_fds = [os.dup(1), os.dup(2)]3738def __enter__(self):39# Assign the null pointers to stdout and stderr.40os.dup2(self.null_fds[0],1)41os.dup2(self.null_fds[1],2)4243def __exit__(self, *_):44# Re-assign the real stdout/stderr back to (1) and (2)45os.dup2(self.save_fds[0],1)46os.dup2(self.save_fds[1],2)47# Close all file descriptors48for fd in self.null_fds + self.save_fds:49os.close(fd)50np.random.seed(3)51with suppress_stdout_stderr():52from solutions import *53from testCase import get_testCase54n_a = 6455n_s = 12856m = 1057dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)5859human_vocab_size = len(human_vocab)60machine_vocab_size = len(machine_vocab)6162im = model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size)63cp1 = im.count_params()64mi1 = len(im.inputs)65mo1 = len(im.outputs)66ml1 = len(im.layers)67m_out1 = np.asarray((cp1, mi1, mo1, ml1))686970# GRADED FUNCTION: one_step_attention7172m_out2 = get_testCase()7374def generateTestCases():75testCases = {76'one_step_attention': {77'partId': 'zcQIs',78'testCases': [79{80'testInput': 0,81'testOutput': m_out282}83]84},85'model': {86'partId': 'PTKef',87'testCases': [88{89'testInput': (Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size),90'testOutput': m_out191}92]93}94}95return testCases96979899