参考Tensorflow——实现递归神经网络RNN
import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt
# Hyper Parameters
BATCH_SIZE = 100
N_STEPS = 28
N_INPUTS = 28
N_CLASSES=10
N_HIDDEN_UNITS=128
LR = 1e-3 # learning rate
TRAINING_ITERS=1e5
# data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255
x_test = x_test / 255
y_train = np.eye(10)[y_train]
y_test = np.eye(10)[y_test]
#graph input
x=tf.placeholder(tf.float32,[None,N_STEPS,N_INPUTS])
y=tf.placeholder(tf.float32,[None,N_CLASSES])
W=tf.Variable(tf.random_normal([N_HIDDEN_UNITS,N_CLASSES]))
b=tf.Variable(tf.constant(.1,shape=[N_CLASSES]))
def RNN(X,weights,b):
#################################################
#cell
#state_is_tupe,指定state的类型为tuple
lstm_cell=tf.nn.rnn_cell.BasicLSTMCell(N_HIDDEN_UNITS,forget_bias=1,state_is_tuple=True)
# _init_state=lstm_cell.zero_state(BATCH_SIZE,dtype=tf.float32)
# outputs,states=tf.nn.dynamic_rnn(lstm_cell,X,initial_state=_init_state)
#dynamic_rnn需要传dtype或者initial_state,如果传initial_state,
#用测试集测试时总报错,要求传入BATCH_SIZE个数据进行测试。改用dtype后没有这个问题
outputs,states=tf.nn.dynamic_rnn(lstm_cell,X,dtype=tf.float32)
#################################################
#hidden layer for output as the final results
results=tf.matmul(states[1],weights)+b
return results
pred=RNN(x,W,b)
#注意加形参logits和labels,否则会报错或者搞错参数位置
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
#softmax_cross_entropy_with_logits:将logits转换成概率,再计算交叉熵损失
#cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf2.nn.softmax(pred)), reduction_indices=1))
train_op=tf.train.AdamOptimizer(LR).minimize(cost)
correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
results=[]
with tf.Session() as se:
se.run(tf.global_variables_initializer())
step=0
for step in range(0,int(TRAINING_ITERS),BATCH_SIZE):
random_index=np.random.choice(x_train.shape[0], BATCH_SIZE, replace=False)
batch_xs,batch_ys=x_train[random_index],y_train[random_index]
se.run(train_op,feed_dict={x:batch_xs,y:batch_ys})
sep_=1000
if step%sep_==0:
res_=[]
acc = se.run(accuracy, feed_dict={x: x_test, y: y_test})
print(step,acc)
results.append(acc)
plt.plot([sep_ * i for i in range(len(results))], results)
y_major_locator = plt.MultipleLocator(.1)
ax = plt.gca()
ax.yaxis.set_major_locator(y_major_locator)
plt.ylim(0, 1)
plt.show()