Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/C5 - Sequence Models/Week 1/Dinosaur Island -- Character-level language model/utils.py
Views: 4819
import numpy as np12def softmax(x):3e_x = np.exp(x - np.max(x))4return e_x / e_x.sum(axis=0)56def smooth(loss, cur_loss):7return loss * 0.999 + cur_loss * 0.00189def print_sample(sample_ix, ix_to_char):10txt = ''.join(ix_to_char[ix] for ix in sample_ix)11txt = txt[0].upper() + txt[1:] # capitalize first character12print ('%s' % (txt, ), end='')131415def get_sample(sample_ix, ix_to_char):16txt = ''.join(ix_to_char[ix] for ix in sample_ix)17txt = txt[0].upper() + txt[1:] # capitalize first character18return txt1920def get_initial_loss(vocab_size, seq_length):21return -np.log(1.0/vocab_size)*seq_length2223def softmax(x):24e_x = np.exp(x - np.max(x))25return e_x / e_x.sum(axis=0)2627def initialize_parameters(n_a, n_x, n_y):28"""29Initialize parameters with small random values3031Returns:32parameters -- python dictionary containing:33Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)34Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)35Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)36b -- Bias, numpy array of shape (n_a, 1)37by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)38"""39np.random.seed(1)40Wax = np.random.randn(n_a, n_x)*0.01 # input to hidden41Waa = np.random.randn(n_a, n_a)*0.01 # hidden to hidden42Wya = np.random.randn(n_y, n_a)*0.01 # hidden to output43b = np.zeros((n_a, 1)) # hidden bias44by = np.zeros((n_y, 1)) # output bias4546parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b,"by": by}4748return parameters4950def rnn_step_forward(parameters, a_prev, x):5152Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']53a_next = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b) # hidden state54p_t = softmax(np.dot(Wya, a_next) + by) # unnormalized log probabilities for next chars # probabilities for next chars5556return a_next, p_t5758def rnn_step_backward(dy, gradients, parameters, x, a, a_prev):5960gradients['dWya'] += np.dot(dy, a.T)61gradients['dby'] += dy62da = np.dot(parameters['Wya'].T, dy) + gradients['da_next'] # backprop into h63daraw = (1 - a * a) * da # backprop through tanh nonlinearity64gradients['db'] += daraw65gradients['dWax'] += np.dot(daraw, x.T)66gradients['dWaa'] += np.dot(daraw, a_prev.T)67gradients['da_next'] = np.dot(parameters['Waa'].T, daraw)68return gradients6970def update_parameters(parameters, gradients, lr):7172parameters['Wax'] += -lr * gradients['dWax']73parameters['Waa'] += -lr * gradients['dWaa']74parameters['Wya'] += -lr * gradients['dWya']75parameters['b'] += -lr * gradients['db']76parameters['by'] += -lr * gradients['dby']77return parameters7879def rnn_forward(X, Y, a0, parameters, vocab_size = 27):8081# Initialize x, a and y_hat as empty dictionaries82x, a, y_hat = {}, {}, {}8384a[-1] = np.copy(a0)8586# initialize your loss to 087loss = 08889for t in range(len(X)):9091# Set x[t] to be the one-hot vector representation of the t'th character in X.92# if X[t] == None, we just have x[t]=0. This is used to set the input for the first timestep to the zero vector.93x[t] = np.zeros((vocab_size,1))94if (X[t] != None):95x[t][X[t]] = 19697# Run one step forward of the RNN98a[t], y_hat[t] = rnn_step_forward(parameters, a[t-1], x[t])99100# Update the loss by substracting the cross-entropy term of this time-step from it.101loss -= np.log(y_hat[t][Y[t],0])102103cache = (y_hat, a, x)104105return loss, cache106107def rnn_backward(X, Y, parameters, cache):108# Initialize gradients as an empty dictionary109gradients = {}110111# Retrieve from cache and parameters112(y_hat, a, x) = cache113Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']114115# each one should be initialized to zeros of the same dimension as its corresponding parameter116gradients['dWax'], gradients['dWaa'], gradients['dWya'] = np.zeros_like(Wax), np.zeros_like(Waa), np.zeros_like(Wya)117gradients['db'], gradients['dby'] = np.zeros_like(b), np.zeros_like(by)118gradients['da_next'] = np.zeros_like(a[0])119120### START CODE HERE ###121# Backpropagate through time122for t in reversed(range(len(X))):123dy = np.copy(y_hat[t])124dy[Y[t]] -= 1125gradients = rnn_step_backward(dy, gradients, parameters, x[t], a[t], a[t-1])126### END CODE HERE ###127128return gradients, a129130131132