TensorFlow学习记录(四)---TensorFlow自动求导机制

一元函数

import tensorflow as tf
x = tf.Variable(3.)
with tf.GradientTape(persistent=True) as tape:
    y = tf.square(x)
    z = tf.pow(x,3)
dy_dx = tape.gradient(y,x)
dz_dx = tape.gradient(z,x)
print(y)
print(dy_dx)
print(z)
print(dz_dx)
del tape #使用完之后手动释放

多元函数

import tensorflow as tf
x = tf.Variable(3.)
y = tf.Variable(4.)
with tf.GradientTape(persistent=True) as tape:
   f = tf.square(x)+2*tf.square(y)+1
dy_dx,df_dy = tape.gradient(f,[x,y])

print(f)
print(dy_dx)
print(df_dy)
del tape #使用完之后手动释放

二阶导数

import tensorflow as tf
x = tf.Variable(3.)
y = tf.Variable(4.)
with tf.GradientTape(persistent=True) as tape2:
    with tf.GradientTape(persistent=True) as tape1:
        f = tf.square(x)+2*tf.square(y)+1
    firsy_grads= tape1.gradient(f,[x,y])
second_grads=tape2.gradient(firsy_grads,[x,y])

print(f)
print(firsy_grads)
print(second_grads)
del tape1
del tape2#使用完之后手动释放

TensorFlow实现一元线性回归

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x = np.array([137.97,104.50,100.00,124.32,79.20,99.00,124.00,114.00,106.69,138.05,53.75,46.91,
              68.00,63.02,81.26,86.21])
y = np.array([145.00,110.00,93.00,116.00,65.32,104.00,118.00,91.00,62.00,133.00,51.00,45.00,78.50,69.65,75.69,95.30])
#设置超参数
learn_rate = 0.0001
iter = 10
display_step = 1
#设置模型参数初始值
np.random.seed(612)
w = tf.Variable(np.random.randn())
b= tf.Variable(np.random.randn())
#训练模型
mse = []
for i in range(0,iter+1):
    with tf.GradientTape() as tape:
        pred = w*x +b
        Loss = 0.5*tf.reduce_mean(tf.square(y-pred))
    mse.append(Loss)
    dL_dw,dL_db = tape.gradient(Loss,[w,b])
    w.assign_sub(learn_rate*dL_dw)#减等于
    b.assign_sub(learn_rate*dL_db)
    if i %display_step == 0:
        print("i:%i,Loss: %f,w:%f,b:%f"%(i,Loss,w.numpy(),b.numpy()))

上一篇:TensorFlow学习记录(九)---人工神经网络经典框架


下一篇:Windows下安装centOS7双系统总结