📚 The CoCalc Library - books, templates and other resources
License: OTHER
import tensorflow as tf12from layer_utils import get_deconv2d_output_dims34def conv(input, name, filter_dims, stride_dims, padding='SAME',5non_linear_fn=tf.nn.relu):6input_dims = input.get_shape().as_list()7assert(len(input_dims) == 4) # batch_size, height, width, num_channels_in8assert(len(filter_dims) == 3) # height, width and num_channels out9assert(len(stride_dims) == 2) # stride height and width1011num_channels_in = input_dims[-1]12filter_h, filter_w, num_channels_out = filter_dims13stride_h, stride_w = stride_dims1415# Define a variable scope for the conv layer16with tf.variable_scope(name) as scope:17# Create filter weight variable1819# Create bias variable2021# Define the convolution flow graph2223# Add bias to conv output2425# Apply non-linearity (if asked) and return output26pass2728def deconv(input, name, filter_dims, stride_dims, padding='SAME',29non_linear_fn=tf.nn.relu):30input_dims = input.get_shape().as_list()31assert(len(input_dims) == 4) # batch_size, height, width, num_channels_in32assert(len(filter_dims) == 3) # height, width and num_channels out33assert(len(stride_dims) == 2) # stride height and width3435num_channels_in = input_dims[-1]36filter_h, filter_w, num_channels_out = filter_dims37stride_h, stride_w = stride_dims38# Let's step into this function39output_dims = get_deconv2d_output_dims(input_dims,40filter_dims,41stride_dims,42padding)4344# Define a variable scope for the deconv layer45with tf.variable_scope(name) as scope:46# Create filter weight variable47# Note that num_channels_out and in positions are flipped for deconv.4849# Create bias variable5051# Define the deconv flow graph5253# Add bias to deconv output5455# Apply non-linearity (if asked) and return output56pass5758def max_pool(input, name, filter_dims, stride_dims, padding='SAME'):59assert(len(filter_dims) == 2) # filter height and width60assert(len(stride_dims) == 2) # stride height and width6162filter_h, filter_w = filter_dims63stride_h, stride_w = stride_dims6465# Define the max pool flow graph and return output66pass6768def fc(input, name, out_dim, non_linear_fn=tf.nn.relu):69assert(type(out_dim) == int)7071# Define a variable scope for the FC layer72with tf.variable_scope(name) as scope:73input_dims = input.get_shape().as_list()74# the input to the fc layer should be flattened75if len(input_dims) == 4:76# for eg. the output of a conv layer77batch_size, input_h, input_w, num_channels = input_dims78# ignore the batch dimension79in_dim = input_h * input_w * num_channels80flat_input = tf.reshape(input, [batch_size, in_dim])81else:82in_dim = input_dims[-1]83flat_input = input8485# Create weight variable8687# Create bias variable8889# Define FC flow graph9091# Apply non-linearity (if asked) and return output92pass939495