Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

📚 The CoCalc Library - books, templates and other resources

132930 views
License: OTHER
1
import tensorflow as tf
2
3
from layer_utils import get_deconv2d_output_dims
4
5
def conv(input, name, filter_dims, stride_dims, padding='SAME',
6
non_linear_fn=tf.nn.relu):
7
input_dims = input.get_shape().as_list()
8
assert(len(input_dims) == 4) # batch_size, height, width, num_channels_in
9
assert(len(filter_dims) == 3) # height, width and num_channels out
10
assert(len(stride_dims) == 2) # stride height and width
11
12
num_channels_in = input_dims[-1]
13
filter_h, filter_w, num_channels_out = filter_dims
14
stride_h, stride_w = stride_dims
15
16
# Define a variable scope for the conv layer
17
with tf.variable_scope(name) as scope:
18
# Create filter weight variable
19
20
# Create bias variable
21
22
# Define the convolution flow graph
23
24
# Add bias to conv output
25
26
# Apply non-linearity (if asked) and return output
27
pass
28
29
def deconv(input, name, filter_dims, stride_dims, padding='SAME',
30
non_linear_fn=tf.nn.relu):
31
input_dims = input.get_shape().as_list()
32
assert(len(input_dims) == 4) # batch_size, height, width, num_channels_in
33
assert(len(filter_dims) == 3) # height, width and num_channels out
34
assert(len(stride_dims) == 2) # stride height and width
35
36
num_channels_in = input_dims[-1]
37
filter_h, filter_w, num_channels_out = filter_dims
38
stride_h, stride_w = stride_dims
39
# Let's step into this function
40
output_dims = get_deconv2d_output_dims(input_dims,
41
filter_dims,
42
stride_dims,
43
padding)
44
45
# Define a variable scope for the deconv layer
46
with tf.variable_scope(name) as scope:
47
# Create filter weight variable
48
# Note that num_channels_out and in positions are flipped for deconv.
49
50
# Create bias variable
51
52
# Define the deconv flow graph
53
54
# Add bias to deconv output
55
56
# Apply non-linearity (if asked) and return output
57
pass
58
59
def max_pool(input, name, filter_dims, stride_dims, padding='SAME'):
60
assert(len(filter_dims) == 2) # filter height and width
61
assert(len(stride_dims) == 2) # stride height and width
62
63
filter_h, filter_w = filter_dims
64
stride_h, stride_w = stride_dims
65
66
# Define the max pool flow graph and return output
67
pass
68
69
def fc(input, name, out_dim, non_linear_fn=tf.nn.relu):
70
assert(type(out_dim) == int)
71
72
# Define a variable scope for the FC layer
73
with tf.variable_scope(name) as scope:
74
input_dims = input.get_shape().as_list()
75
# the input to the fc layer should be flattened
76
if len(input_dims) == 4:
77
# for eg. the output of a conv layer
78
batch_size, input_h, input_w, num_channels = input_dims
79
# ignore the batch dimension
80
in_dim = input_h * input_w * num_channels
81
flat_input = tf.reshape(input, [batch_size, in_dim])
82
else:
83
in_dim = input_dims[-1]
84
flat_input = input
85
86
# Create weight variable
87
88
# Create bias variable
89
90
# Define FC flow graph
91
92
# Apply non-linearity (if asked) and return output
93
pass
94
95