TensorFlow 进行线性回归模型计算

初学TensorFlow

不多说了直接粘代码

代码抄的,注释自己的理解,欢迎交流

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

def normalize(X):
    """"Normalize the array"""
    mean = np.mean(X)
    std = np.std(X);
    X = (X-mean)/std
    return X

boston = tf.contrib.learn.datasets.load_dataset("boston")
#   http://c.biancheng.net/view/1906.html

boston = tf.contrib.learn.datasets.load_dataset('boston') #导入相关数据
X_train, Y_train = boston.data[:,5], boston.target#将数据样本进行赋值
n_samples = len(X_train) #读取训练集个数
X = tf.placeholder(tf.float32, name='X') #定义模型的变量   =====>模型 y = w * x + b
Y = tf.placeholder(tf.float32, name='Y')
b = tf.Variable(0.0)
w = tf.Variable(0.0)
#定义结束

Y_hat = X * w + b
loss = tf.square(Y - Y_hat, name='loss') # 定义损失函数
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss) #对损失函数求最优解,本过程也就是求解函数最优的关键运算
init_op = tf.global_variables_initializer() # 初始化
total = []
with tf.Session() as sess: # 采用with方法进行运算,将tf.Session()返回结果放在sess中
    sess.run(init_op)  #运行初始化
    writer = tf.summary.FileWriter('graph', sess.graph) #生成运算记录文件,可以查看构建的算法 ===>查看方法:在命令行输入 D:\PythonProject\TensorFlow>tensorboard --logdir=graph   ,graph 为Filewriter输入的参数 将返回的网址复制粘贴
    for i in range(100):
        total_loss = 0
        for x,y in zip(X_train,Y_train):
            _, l = sess.run([optimizer, loss], feed_dict = {X:x, Y:y}) # 计算的误差值
            total_loss += l #这个是“L” 将返回的误差进行累加
        total.append(total_loss / n_samples) # 不解释了
        print('Epoch {0}:Loss{1}'.format(i, total_loss / n_samples))
    writer.close()
    b_value, w_value = sess.run([b, w])
Y_pred = X_train * w_value + b_value
print('Done')
plt.plot(X_train, Y_train, 'bo',label='Real data')
plt.plot(X_train, Y_pred, 'r', label='Predicted Data')
plt.legend()
plt.show()
plt.plot(total)
plt.show()

 

上一篇:Lucene核心--构建Lucene搜索(下篇,理论篇)


下一篇:02tensorflow的四则运算