单层神经网络demo
神经网络做的是提前特征的工作,与逻辑回归相比,添加了中间层和**函数。
这里输入设置为784,输出的分类为10
隐藏层:让输入的特征进行更好的组合变化,把784个特征按照某种方式映射成更高级,识别能力更强的特征。这里把隐藏层设置为50个单元。
1.导入包
#Mnist数据集 属性已经写好 可以直接调用
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#one_hot=True 表示对label进行one-hot编码,比如标签4可表示为[0,0,0,0,1,0,0,0,0,0,0]
mnist = input_data.read_data_sets('data/',one_hot=True)
2.参数设置
numClass = 10
inputSize = 784
numHiddenUnits = 50 #隐藏层50个单元
trainingIterations = 10000
batchSize = 100 #每次训练在训练集中取batchsize个样本训练
X = tf.placeholder(tf.float32,shape = [None,inputSize])
y = tf.placeholder(tf.float32,shape = [None,numClass])
3. 参数初始化
W1 = tf.Variable(tf.truncated_normal([inputSize,numHiddenUnits],stddev=0.1))
B1 = tf.Variable(tf.constant(0.1),[numHiddenUnits])
W2 = tf.Variable(tf.truncated_normal([numHiddenUnits,numClass],stddev=0.1))
B2 = tf.Variable(tf.constant(0.1),[numHiddenUnits])
4.网络结构
hiddenLayerOutput = tf.matmul(X,W1) + B1 #隐藏层
hiddenLayerOutput = tf.nn.relu(hiddenLayerOutput) #添加**函数relu
finalOutput = tf.matmul(hiddenLayerOutput,W2) + B2 #中间层
finalOutput = tf.nn.relu(finalOutput) #添加**函数relu
5.网络迭代
#网络迭代
#损失值。指定tf.nn.softmax_cross_entropy_with_logits模块下交叉熵损失函数,labels为数据中的真实值,logits为预测值
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y,logits = finalOutput))
#指定优化器,最小化损失函数
opt = tf.train.GradientDescentOptimizer(learning_rate = .1).minimize(loss)
6.通过当前结果计算精度值
correct_prediction = tf.equal(tf.argmax(finalOutput,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float32"))
7.执行
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(trainingIterations):
batch = mnist.train.next_batch(batchSize)
batchInput = batch[0]
batchLabels = batch[1]
_,trainLoss = sess.run([opt,loss],feed_dict={X:batchInput,y:batchLabels})
if i%1000 == 0:
trainAccuracy = accuracy.eval(session=sess,feed_dict={X:batchInput,y:batchLabels})
print("step %d , training accuracy %g"%(i,trainAccuracy))
效果: