[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

[tf]|[tensorboard]tensorflow学习笔记(3)

参考:莫烦Python: morvanzhou.github.io

with tf.name_scope():

  • 为变量取别名(用于tensorboard上查看)
with tf.name_scope('Weights'):
             Weights = tf.Variable(tf.random_normal([in_size, out_size]),name = 'W')
  • 上式别名为:W
with tf.name_scope('Weights_1'):
             Weights = tf.Variable(tf.random_normal([in_size, out_size]))
  • 上式别名为:Weights_1

tf.summary.FileWriter()

sess = tf.Session()
writer = tf.summary.FileWriter("logs/",sess.graph)
  • log是事件文件所在的目录,这里是工程目录下的log目录。第二个参数是事件文件要记录的图,也就是tensorflow默认的图。

完整代码

"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np

def add_layer(inputs, in_size, out_size, activation_function=None):
#activation_function=None 默认为线性函数
    # add one more layer and return the output of this layer
    with tf.name_scope('layer'):
        with tf.name_scope('Weights'):
             Weights = tf.Variable(tf.random_normal([in_size, out_size]),name = 'W')
    #随机生成行为in_size,列为out_size的矩阵
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')
    #(一组向量)给予biases初值
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
    # Wx_plus_b
        if activation_function is None:
            #** Wx_plus_b
            outputs = Wx_plus_b
    #保持线性关系(不改变 Wx_plus_b)
        else:
            outputs = activation_function(Wx_plus_b)
    #relu  Wx_plus_b
        return outputs

# Make up some real data
#给出input的值x_data与y_data
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
#给出噪点(使其变得更像真实数据)变为非连续数据的一组数据
y_data = np.square(x_data) - 0.5 + noise

# define placeholder for inputs to network
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1],name = 'x_input')
    #行不定,列为1,可以解为列向量
    ys = tf.placeholder(tf.float32, [None, 1],name = 'y_input')
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
#输入层的神经元为1;隐藏层的神经元为10;输出层的神经元为1
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)

# the error between prediction and real data
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                     reduction_indices=[1]))
#每个例子的平方和再求平均值,最后把矩阵向列方向压缩成向量再向行方向压缩成数
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#学习率为0.1的梯度下降法减小loss的误差
sess = tf.Session()
writer = tf.summary.FileWriter("logs/",sess.graph)
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12:
    init = tf.initialize_all_variables()
else:
    init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    #迭代1000次
    # training
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        # to see the step improvement
        print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))

tensorboard使用步骤(Graph可视化)

  • 操作系统:win10
  • tensorboard版本:1.12.1
  • tensorflow版本:1.12.0
  • 更新日期:2018/12/30
  1. 先编译完成上述代码
  2. 在生成的logs文件目录下可找到生成文件,如图:

[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

  1. 打开cmd ,cd到logs文件夹的父文件夹,如图:

[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

  1. 执行命令
tensorboard --logdir=logs
#注意logs别加引号

​ ,如图:

[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

  1. 在Chroms地址栏输入网址:http://localhost:6006回车即可
http://localhost:6006

[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

  • pay attention:运行http://localhost:6006过程中不可退出此状态

[笔记]|[tf]|[tensorboard]|[例子2可视化]tensorflow学习笔记(3)

summary用法

  1. tf.summary.scalar()
    • 用来显示标量信息,其格式为:
tf.summary.scalar(tags, values, collections=None, name=None)

​ 例如:tf.summary.scalar(‘mean’, mean)

​ 一般在画loss,accuary时会用到这个函数。

  1. tf.summary.histogram()
    • 用来显示直方图信息,其格式为:
tf.summary.histogram(tags, values, collections=None, name=None) 

​ 例如: tf.summary.histogram(‘histogram’, var)

​ 一般用来显示训练过程中变量的分布情况

  1. tf.summary.merge_all()
    • merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。如果没有特殊要求,一般用这一句就可一显示训练时的各种信息了。
    • 格式:
tf.summaries.merge_all(key='summaries')
  1. tf.summary.FileWriter
    • 指定一个文件用来保存图。
    • 格式:
tf.summary.FileWriter(path,sess.graph)

tensorboard(histograms)

  • 完整代码
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np

def add_layer(inputs, in_size, out_size,n_layer, activation_function=None):
#activation_function=None 默认为线性函数
    # add one more layer and return the output of this layer
    layer_name = 'layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('Weights'):
             Weights = tf.Variable(tf.random_normal([in_size, out_size]),name = 'W')
             tf.summary.histogram('value',Weights)
            #tag = 'value' ; value = Weights
    #随机生成行为in_size,列为out_size的矩阵
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')
            tf.summary.histogram('value', biases)
            #tag = 'value' ; value = biases
    #(一组向量)给予biases初值
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
    # Wx_plus_b
        if activation_function is None:
            #** Wx_plus_b
            outputs = Wx_plus_b
    #保持线性关系(不改变 Wx_plus_b)
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram('value', outputs)
    #relu  Wx_plus_b
        return outputs

# Make up some real data
#给出input的值x_data与y_data
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
#给出噪点(使其变得更像真实数据)变为非连续数据的一组数据
y_data = np.square(x_data) - 0.5 + noise

# define placeholder for inputs to network
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1],name = 'x_input')
    #行不定,列为1,可以解为列向量
    ys = tf.placeholder(tf.float32, [None, 1],name = 'y_input')
# add hidden layer
l1 = add_layer(xs, 1, 10,n_layer = 1, activation_function=tf.nn.relu)
#输入层的神经元为1;隐藏层的神经元为10;输出层的神经元为1
# add output layer
prediction = add_layer(l1, 10, 1,n_layer = 2, activation_function=None)

# the error between prediction and real data
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                     reduction_indices=[1]))
    tf.summary.scalar('value',loss)
    #tag = 'value' ; value = loss
#每个例子的平方和再求平均值,最后把矩阵向列方向压缩成向量再向行方向压缩成数
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#学习率为0.1的梯度下降法减小loss的误差
sess = tf.Session()
merged = tf.summary.merge_all()
#存盘所有信息
writer = tf.summary.FileWriter("logs/",sess.graph)
#指定logs文件夹保存图
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12:
    init = tf.initialize_all_variables()
else:
    init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    #迭代1000次
    # training
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        # to see the step improvement
        result = sess.run(merged,
                          feed_dict={xs: x_data, ys: y_data})
        #重要:run merged
        writer.add_summary(result,i)
        print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))