tensorflow实现简单线性回归

import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn import datasets
sess=tf.Session()
iris=datasets.load_iris()
x_val=np.array([x[3] for x in iris.data])
y_val=np.array([y[0] for y in iris.data])
batch_size=25
learning_rate=0.1
iterations=50
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_target=tf.placeholder(shape=[None,1],dtype=tf.float32)#初始化变量和占位符
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
model_output=tf.add(tf.multiply(x_data,A),b)
loss_l1=tf.reduce_mean(tf.square(y_target-model_output))
init=tf.global_variables_initializer()
sess.run(init)
my_opt_l1=tf.train.GradientDescentOptimizer(learning_rate)
train_step_l1=my_opt_l1.minimize(loss_l1)
loss_vec_l1=[]
for i in range(iterations):
    rand_index=np.random.choice(len(x_val),size=batch_size)
    rand_x=np.transpose([x_val[rand_index]])
    rand_y=np.transpose([y_val[rand_index]])
    sess.run(train_step_l1,feed_dict={x_data:rand_x,y_target:rand_y})
    
    temp_loss=sess.run(loss_l1,feed_dict={x_data:rand_x,y_target:rand_y})
    loss_vec_l1.append(temp_loss)
    if (i+1)%25==0:
        print("step#"+str(i+1)+"A ="+str(sess.run(A))+"b="+str(sess.run(b)))
plt.plot(loss_vec_l1,"r",label="L1 Loss")
plt.title("L1 loss")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()

plt.show()

结果展示

step#25A =[[ 1.80136609]]b=[[ 3.35964632]]
step#50A =[[ 1.2254194]]b=[[ 4.35343599]]
tensorflow实现简单线性回归