TensorFlow Mechanics 101
TensorFlow Mechanics 101
mnist.py
fully_connected_feed.py
fully_connected_feed.py
python fully_connected_feed.py
Download
run_training()
input_data.read_data_sets()
DataSet
NOTE
fake_data
data_sets.train
data_sets.validation
data_sets.test
tf.placeholder
batch_size
batch_size
sess.run()
feed_dict
inference() loss()
inference()
loss()
training()
Inference
inference()
tf.name_scope
with tf.name_scope('hidden1') as scope:
tf.Variable
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1
hidden1/weights
tf.truncated_normal
hidden1
[IMAGE_PIXELS, hidden1_units]
tf.truncated_normal
tf.zeros
tf.nn.relu
tf.Variable
tf.matmul
tf.matmul
training()
logits
Loss
loss()
labels_placeholder
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
tf.nn.softmax_cross_entropy_with_logits
inference()
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name='xentropy')
tf.reduce_mean
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
Training
training()
loss()
tf.scalar_summary
SummaryWriter
tf.scalar_summary(loss.op.name, loss)
tf.train.GradientDescentOptimizer
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
minimize()
train_op
The Graph
run_training()
with
tf.Graph
with tf.Graph().as_default():
tf.Graph
The Session
tf.Session
sess = tf.Session()
Session
with
sess.run()
tf.Variable
init = tf.initialize_all_variables()
sess.run(init)
sess.run()
init
Train Loop
fill_feed_dict()
DataSet
batch_size
feed_dict = {
images_placeholder: images_feed,
tf.group
labels_placeholder: labels_feed,
}
sess.run()
feed_dict
[train_op, loss]
for step in xrange(FLAGS.max_steps):
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
sess.run()
Tensor
train_op
None
Operation
loss
if step % 100 == 0:
print 'Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)
summary_op = tf.merge_all_summaries()
tf.train.SummaryWriter
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
graph_def=sess.graph_def)
summary_op
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
NOTE
add_summary()
tf.train.Saver
saver = tf.train.Saver()
saver.save()
saver.restore()
saver.restore(sess, FLAGS.train_dir)
data_sets.test
evaluation()
loss()
eval_correct = mnist.evaluation(logits, labels_placeholder)
evaluation()
tf.nn.in_top_k
Eval Output
feed_dict
for step in xrange(steps_per_epoch):
sess.run()
eval_correct
mnist.py
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
true_count
in_top_k