这样是比较好的一个summary命名
(1)'networks'、'layer_%d' % n_layer、'weights'三个命名空间相互叠加
(2)
if i % 50 == 0:
result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
writer.add_summary(result, i)
逐步写入的程序如上面所示
(3)最后的tensorboard图还是比较完美的
import numpy as np
import tensorflow as tf
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
layer_name = 'layer_%d' % n_layer
with tf.name_scope(layer_name):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
tf.summary.histogram('weights', Weights)
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='B')
tf.summary.histogram('biases', biases)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
with tf.name_scope('networks'):
l1 = add_layer(xs, 1, 10, 1, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, 2, activation_function=None)
with tf.name_scope('losses'):
loss = tf.reduce_mean(tf.square(ys - prediction))
tf.summary.scalar('Loss', loss)
# train框,其中包含梯度下降步骤和权重更新步骤
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
merged = tf.summary.merge_all()
with tf.Session() as sess:
writer = tf.summary.FileWriter('path/', tf.get_default_graph())
sess.run(init)
for i in range(10001):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
writer.add_summary(result, i)
writer.close()