In [16]:
# Back-propagation
# Optimizer { Adam Optimizer : lr=0.03}
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)