📚 The CoCalc Library - books, templates and other resources
License: OTHER
import tensorflow as tf12from utils import *3from autoencoder import *45batch_size = 1006batch_shape = (batch_size, 28, 28, 1)7num_visualize = 1089lr = 0.0110num_epochs = 501112def calculate_loss(original, reconstructed):13return tf.div(tf.reduce_sum(tf.square(tf.sub(reconstructed,14original))),15tf.constant(float(batch_size)))1617def train(dataset):18input_image, reconstructed_image = autoencoder(batch_shape)19loss = calculate_loss(input_image, reconstructed_image)20optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)2122init = tf.global_variables_initializer()23with tf.Session() as session:24session.run(init)2526dataset_size = len(dataset.train.images)27print "Dataset size:", dataset_size28num_iters = (num_epochs * dataset_size)/batch_size29print "Num iters:", num_iters30for step in xrange(num_iters):31input_batch = get_next_batch(dataset.train, batch_size)32loss_val, _ = session.run([loss, optimizer],33feed_dict={input_image: input_batch})34if step % 1000 == 0:35print "Loss at step", step, ":", loss_val3637test_batch = get_next_batch(dataset.test, batch_size)38reconstruction = session.run(reconstructed_image,39feed_dict={input_image: test_batch})40visualize(test_batch, reconstruction, num_visualize)4142if __name__ == '__main__':43dataset = load_dataset()44train(dataset)45464748