TensorFlow2变为动态图优先模式,可以在计算时同时获得计算图与数值结果,在代码调试并实时打印数据。
核心功能:GPU并行加速计算,自动梯度(tape.grandient),常用神经网络接口(网络运算函数,常用网络层,模型保存加载等)
梯度下降法 线性模型实战(未使用tf,手动求解梯度并更新)
# This is a sample Python script.
import numpy as np
# 均方误差
def mse(b,w,points):
totalError=0
for i in range(0,len(points)):
x=points[i,0]
y=points[i,1]
#loss 实际与预测差值平方
totalError+=(y-(w*x+b))**2
return totalError/float(len(points))
# 计算梯度
def step_gradient(b_current,w_current,points,lr):
b_grandient=0
w_grandient=0
M=float(len(points))
for i in range(0,len(points)): #数据规模
x = points[i, 0]
y = points[i, 1]
# 误差函数的梯度值
b_grandient+=(2/M)*((w_current*x+b_current)-y)
w_grandient+=(2/M)*x*((w_current*x+b_current)-y)
# 更新系数 b'=b-lr*grandient
new_b=b_current-(lr*b_grandient)
new_w=w_current-(lr*w_grandient)
return [new_b,new_w]
def gradient_descent(points,starting_b,starting_w,lr,num_iterations):
b=starting_b
w=starting_w
for step in range(num_iterations): # 迭代次数
b,w=step_gradient(b,w,np.array(points),lr)
loss=mse(b,w,points)
if step%50==0:
print(f'iteration:{step},loss:{loss},w:{w},b:{b}')
return [b,w]
def main():
data = []
for i in range(100):
x = np.random.uniform(-10., 10.)
eps = np.random.normal(0., 0.01)
y = 1.477 * x + 0.089 + eps
data.append([x, y])
data = np.array(data)
lr=0.01
initial_b=0
initial_w=0
num_iterations=1000
[b,w]=gradient_descent(data,initial_b,initial_w,lr,num_iterations)
loss=mse(b,w,data)
print(f'Final loss:{loss},w:{w},b:{b}')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()