深度学习:TensorFlow 基础操作
tensorflow是当前最流行的深度学习框架,在这里记录和分享一些tf入门的基本姿势。
国际惯例,代码的第一行输出献给“Hello,World!”。
"""
HelloWorld example using TensorFlow.
"""
from __future__ import print_function
import datetime
import tensorflow as tf
print('Start at: ', datetime.datetime.now()) # 开始时间
hello = tf.constant('Hello, World!') # 定义一个常量
sess = tf.Session() # 开始执行tensorflow的会话
# 打印输出
print(sess.run(hello)) # b'Hello, World!'
print('End at: ', datetime.datetime.now()) # 结束时间
基本运算操作
"""
Basic constant operations example using TensorFlow.
"""
from __future__ import print_function
import tensorflow as tf
# Method1:定义常量后执行
# 定义常量
a = tf.constant(3)
b = tf.constant(5)
# 执行默认图操作
with tf.Session() as sess:
print("a=3, b=5")
print("常量相加: %i" % sess.run(a + b)) # 8
print("常量相乘: %i" % sess.run(a * b)) # 15
# Method2:定义类型、输入值后再执行
# 定义输入类型
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
# 定义运算规则
add = tf.add(a, b)
mul = tf.multiply(a, b)
# 执行默认图操作
with tf.Session() as sess:
# 使用输入变量执行运算
print("变量相加: %i" % sess.run(add, feed_dict={a: 3, b: 5})) # 8
print("变量相乘: %i" % sess.run(mul, feed_dict={a: 3, b: 5})) # 15
# 矩阵乘法
# 定义一个1x2矩阵
matrix1 = tf.constant([[1., 4.]])
# 定义一个2x1矩阵
matrix2 = tf.constant([[3.], [2.]])
# 定义运算
product = tf.matmul(matrix1, matrix2)
# 执行
with tf.Session() as sess:
result = sess.run(product)
print(result) # [[ 11.]]
最最基础的线性回归模型
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy
import tensorflow as tf
rng = numpy.random
# 参数
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# 训练数据
train_X = numpy.asarray([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
train_Y = numpy.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = train_X.shape[0]
# tf图输入
X = tf.placeholder("float")
Y = tf.placeholder("float")
# 设置模型权重
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# 构造线性模型
pred = tf.add(tf.multiply(X, W), b)
# 均方差(Mean squared error)
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)
# 梯度下降
# Variable对象的trainable属性值默认为True,minimize()函数会调整W和b
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# 初始化全局的变量
init = tf.global_variables_initializer()
# 开始训练
with tf.Session() as sess:
# 执行初始化
sess.run(init)
# 拟合训练数据
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# 每轮训练输出日志
if (epoch + 1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(c),
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# 展示
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# 测试样本
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
print("Testing... (Mean square loss Comparison)")
testing_cost = sess.run(
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) # 同训练损失函数
print("Testing cost=", testing_cost)
print("Absolute mean square loss difference:", abs(training_cost - testing_cost))
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
基础模型中的逻辑回归模型
from __future__ import print_function
import tensorflow as tf
# 导入MNIST数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./tmp/data/", one_hot=True)
# 参数
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
# 图输入
x = tf.placeholder(tf.float32, [None, 784]) # mnist图像数据(28*28=784)
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 数字识别(10类)
# 设置模型权重
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# 构造模型
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# 使用交叉熵(cross entropy)最小化误差
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# 初始化全局的变量
init = tf.global_variables_initializer()
# 开始训练
with tf.Session() as sess:
# 执行初始化
sess.run(init)
# 周期训练
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
# 全部批次循环
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# 执行优化与计算损失率
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# 计算平均损失率
avg_cost += c / total_batch
# 间隔打印日志
if (epoch + 1) % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# 测试模型
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# 计算准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
今天整理先到这里,如果还有老铁想要学习人工智能,领取学习资料的可以来加我!!