CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
amanchadha

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: amanchadha/coursera-deep-learning-specialization
Path: blob/master/C5 - Sequence Models/Week 1/Dinosaur Island -- Character-level language model/utils.py
Views: 4819
1
import numpy as np
2
3
def softmax(x):
4
e_x = np.exp(x - np.max(x))
5
return e_x / e_x.sum(axis=0)
6
7
def smooth(loss, cur_loss):
8
return loss * 0.999 + cur_loss * 0.001
9
10
def print_sample(sample_ix, ix_to_char):
11
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
12
txt = txt[0].upper() + txt[1:] # capitalize first character
13
print ('%s' % (txt, ), end='')
14
15
16
def get_sample(sample_ix, ix_to_char):
17
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
18
txt = txt[0].upper() + txt[1:] # capitalize first character
19
return txt
20
21
def get_initial_loss(vocab_size, seq_length):
22
return -np.log(1.0/vocab_size)*seq_length
23
24
def softmax(x):
25
e_x = np.exp(x - np.max(x))
26
return e_x / e_x.sum(axis=0)
27
28
def initialize_parameters(n_a, n_x, n_y):
29
"""
30
Initialize parameters with small random values
31
32
Returns:
33
parameters -- python dictionary containing:
34
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
35
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
36
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
37
b -- Bias, numpy array of shape (n_a, 1)
38
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
39
"""
40
np.random.seed(1)
41
Wax = np.random.randn(n_a, n_x)*0.01 # input to hidden
42
Waa = np.random.randn(n_a, n_a)*0.01 # hidden to hidden
43
Wya = np.random.randn(n_y, n_a)*0.01 # hidden to output
44
b = np.zeros((n_a, 1)) # hidden bias
45
by = np.zeros((n_y, 1)) # output bias
46
47
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b,"by": by}
48
49
return parameters
50
51
def rnn_step_forward(parameters, a_prev, x):
52
53
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
54
a_next = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b) # hidden state
55
p_t = softmax(np.dot(Wya, a_next) + by) # unnormalized log probabilities for next chars # probabilities for next chars
56
57
return a_next, p_t
58
59
def rnn_step_backward(dy, gradients, parameters, x, a, a_prev):
60
61
gradients['dWya'] += np.dot(dy, a.T)
62
gradients['dby'] += dy
63
da = np.dot(parameters['Wya'].T, dy) + gradients['da_next'] # backprop into h
64
daraw = (1 - a * a) * da # backprop through tanh nonlinearity
65
gradients['db'] += daraw
66
gradients['dWax'] += np.dot(daraw, x.T)
67
gradients['dWaa'] += np.dot(daraw, a_prev.T)
68
gradients['da_next'] = np.dot(parameters['Waa'].T, daraw)
69
return gradients
70
71
def update_parameters(parameters, gradients, lr):
72
73
parameters['Wax'] += -lr * gradients['dWax']
74
parameters['Waa'] += -lr * gradients['dWaa']
75
parameters['Wya'] += -lr * gradients['dWya']
76
parameters['b'] += -lr * gradients['db']
77
parameters['by'] += -lr * gradients['dby']
78
return parameters
79
80
def rnn_forward(X, Y, a0, parameters, vocab_size = 27):
81
82
# Initialize x, a and y_hat as empty dictionaries
83
x, a, y_hat = {}, {}, {}
84
85
a[-1] = np.copy(a0)
86
87
# initialize your loss to 0
88
loss = 0
89
90
for t in range(len(X)):
91
92
# Set x[t] to be the one-hot vector representation of the t'th character in X.
93
# if X[t] == None, we just have x[t]=0. This is used to set the input for the first timestep to the zero vector.
94
x[t] = np.zeros((vocab_size,1))
95
if (X[t] != None):
96
x[t][X[t]] = 1
97
98
# Run one step forward of the RNN
99
a[t], y_hat[t] = rnn_step_forward(parameters, a[t-1], x[t])
100
101
# Update the loss by substracting the cross-entropy term of this time-step from it.
102
loss -= np.log(y_hat[t][Y[t],0])
103
104
cache = (y_hat, a, x)
105
106
return loss, cache
107
108
def rnn_backward(X, Y, parameters, cache):
109
# Initialize gradients as an empty dictionary
110
gradients = {}
111
112
# Retrieve from cache and parameters
113
(y_hat, a, x) = cache
114
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
115
116
# each one should be initialized to zeros of the same dimension as its corresponding parameter
117
gradients['dWax'], gradients['dWaa'], gradients['dWya'] = np.zeros_like(Wax), np.zeros_like(Waa), np.zeros_like(Wya)
118
gradients['db'], gradients['dby'] = np.zeros_like(b), np.zeros_like(by)
119
gradients['da_next'] = np.zeros_like(a[0])
120
121
### START CODE HERE ###
122
# Backpropagate through time
123
for t in reversed(range(len(X))):
124
dy = np.copy(y_hat[t])
125
dy[Y[t]] -= 1
126
gradients = rnn_step_backward(dy, gradients, parameters, x[t], a[t], a[t-1])
127
### END CODE HERE ###
128
129
return gradients, a
130
131
132