[tensorflow 연어] tf.clip_by_global_norm
1695 단어 tensorflow 문법
with tf.variable_scope('crf'):
self.log_likelihood, self.transition_params = crf.crf_log_likelihood(inputs=self.logits,tag_indices=self.targets,sequence_lengths=self.sequence_length)
with tf.name_scope('loss'):
# self.log_likelihood , -self.log_likelihood
self.log_loss = tf.reduce_mean(-self.log_likelihood)
#
self.l2_loss = tf.contrib.layers.apply_regularization(
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
weights_list=tf.trainable_variables())
self.loss = self.log_loss+self.l2_loss
self.train_summary = tf.summary.scalar('loss',self.loss)
self.vali_summary = tf.summary.scalar('loss',self.loss)
# self.merged = tf.summary.merge_all()
optimizer = tf.train.AdamOptimizer(self.learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
#
tvars = tf.trainable_variables()
#gradients , loss trainable 。
#clip_by_global_norm trainable , trainable , clip , globalnorm
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), self.clip_grad)
#apply_gradients tf.train.Optimizer.minimize ,minizie , tf.train.Optimizer.compute_gradients
#tf.train.Optimizer.apply_gradients, , , zip( , )
#tvars :https://blog.csdn.net/u014595019/article/details/52805444
self.train_op = optimizer.apply_gradients(zip(grads, tvars))