@user728291, optimize_loss .
optimize_loss -
optimize_loss(
loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True
)
global_step , .
from tensorflow.python.ops import variable_scope
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
global_step = variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(0, dtype=dtypes.int64))
training_operation = optimizer.minimize(loss_operation)
training_operation = tf.contrib.layers.optimize_loss(
loss_operation, global_step, learning_rate=rate, optimizer='Adam',
summaries=["gradients"])
summary = tf.summary.merge_all()
tensorflow /:
summary_writer = tf.summary.FileWriter(logdir_run_x, sess.graph)
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, i)
summary_writer.flush()
logdir_run_x - . , TensorBoard , . OptimizeLoss. , beta .
UPDATE: tf slim, , , , .
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = slim.learning.create_train_op(loss_operation, optimizer,summarize_gradients=True)
summarize_gradients=True, , . Tensorboard summarize_grads