使用多个梯度下降的方式进行测试,同时使用ops.apply_gradient进行梯度的下降

1. ops = tf.train.GradientDescentOptimizer(learning_rate) 构建优化器

参数说明:learning_rate 表示输入的学习率

2.ops.compute_gradients(loss, tf.train_variables(), colocate_gradients_with_ops=True) 

参数说明:loss表示损失值, tf.train_variables() 表示需要更新的参数, colocate_gradients_with_ops= True表示进行渐变的操作

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

TRAIN_STEP = 20

data = []
num_data = 1000
for i in range(num_data):
    x_data = np.random.normal(0.0, 0.55)
    y_data = 0.1 * x_data + 0.3 + np.random.normal(0.0, 0.03)
    data.append([x_data, y_data])

# 第二步:将数据进行分配,分成特征和标签
X_data = [v[0] for v in data]
y_data = [v[1] for v in data]

w = tf.Variable(tf.truncated_normal([1], -1, 1), name='w')
b = tf.Variable(tf.zeros([1]))
learning_rate_placeholder = 0.5
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step, 15, 0.5, staircase=True) # 进行学习率的更替操作

optimizer = tf.train.GradientDescentOptimizer(learning_rate)
logits = X_data * w + b
loss = tf.reduce_mean(tf.square(y_data - logits))
gradient = optimizer.compute_gradients(loss, tf.trainable_variables(), colocate_gradients_with_ops=True)
grad_op = optimizer.apply_gradients(gradient, global_step=global_step) # 进行global_step的更新操作
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_op):
    train_op = tf.group(grad_op)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(TRAIN_STEP):
    print('epoch:', i)
    # print('w:', w.numpy())
    # print('b:', b.numpy)
    sess.run(train_op)
    print(sess.run(learning_rate))

print(sess.run(global_step))
print(sess.run(w), sess.run(b))
plt.plot(X_data,y_data,"+")
plt.plot(X_data,sess.run(w) * X_data + sess.run(b))
plt.show()

 

上一篇:达梦数据库修改表失败 错误号: -6407 错误消息: 锁超时


下一篇:03_矩阵基础