课程链接:https://www.bilibili.com/video/BV1Ct411H7rm?p=12&t=3
目录
01线性回归
import tensorflow.keras
import numpy as np
import matplotlib.pyplot as plt
# Sequential按顺序构成的模型
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# 使用numpy生成100个随机点
x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data*0.1+0.2
# 构建一个顺序模型
model = Sequential()
# 在模型中添加一个全连接层
# units是一个神经元,输出一个值; input_dim是指输入一个x
model.add(Dense(units=1,input_dim=1))
# 有了结构之后,我们需要去编译这个模型
# 设置优化器、损失函数、评价矩阵
# sgd 随机梯度下降;mse均方误差
model.compile(optimizer='sgd',loss='mse')
# 设置迭代次数
for step in range(5001):
cost = model.train_on_batch(x_data,y_data)
if step%500==0:
print('cost:',cost)
# 打印权值和偏置值
W,b = model.layers[0].get_weights()
print("W:",W)
print("b:",b)
# x_data输入网络中,得到预测值y_pred
y_pred = model.predict(x_data)
print("y_pred",y_pred)
plt.scatter(x_data,y_data)
plt.plot(x_data,y_pred,'r-',lw = 3)
plt.show()
02非线性回归
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.optimizers import SGD
# 从-0.5到0.5,生成200个点
x_data = np.linspace(-0.5,0.5,200)
noise = np.random.normal(0,0.2,x_data.shape)
y_data = np.square(x_data)+noise
# plt.scatter(x_data,y_data)
# plt.show()
# 构建一个顺序模型
model = Sequential()
# 在模型中添加一个全连接层
# units是神经元个数; input_dim是指输入一个x的维度
model.add(Dense(units=10,input_dim=1,activation='tanh'))
# model.add(Activation('tanh'))
model.add(Dense(units = 1,activation='tanh'))
# model.add(Activation('tanh'))
# 导入优化器SGD后,可以自己定义优化算法
sgd = SGD(lr=0.3)
# 有了结构之后,我们需要去编译这个模型
# 设置优化器、损失函数、评价矩阵
# sgd 随机梯度下降;mse均方误差
model.compile(optimizer=sgd,loss='mse')
# 设置迭代次数
for step in range(10001):
cost = model.train_on_batch(x_data,y_data)
if step%500==0:
print('cost:',cost)
# 打印权值和偏置值
W,b = model.layers[0].get_weights()
print("W:",W)
print("b:",b)
# x_data输入网络中,得到预测值y_pred
y_pred = model.predict(x_data)
print("y_pred",y_pred)
plt.scatter(x_data,y_data)
plt.plot(x_data,y_pred,'r-',lw = 3)
plt.show()
03mnist数据集分类
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import SGD
#载入数据
# x_train (60000, 28, 28)
# y_train (60000,)
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print("x_shape",x_train.shape)
print("y_shape",y_train.shape)
# 数据维度变化 (60000, 28, 28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255
x_test = x_test.reshape(x_test.shape[0],-1)/255
# 换onehot格式
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# 创建模型,输入784个神经元,输出10个神经元
model = Sequential()
model.add(Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax'))
#定义优化器
sgd = SGD(lr=0.2)
# 优化器在优化时,计算损失值,同时在训练过程中计算评价指标准确率
model.compile(optimizer=sgd,
loss = 'mse',
metrics=['accuracy'])
# 训练模型
# epochs即周期,一个周期即学习一遍所有样本(本题是60000张图片),学习10遍
# batch_size=32在每个周期内的60000张图片,每次取32张训练,直到取完60000张
model.fit(x_train,y_train,batch_size=32,epochs=10)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('\ntest accuracy',accuracy)
04 交叉熵和Dropout
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation,Dropout
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import SGD
#载入数据
# x_train (60000, 28, 28)
# y_train (60000,)
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print("x_shape",x_train.shape)
print("y_shape",y_train.shape)
# 数据维度变化 (60000, 28, 28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255
x_test = x_test.reshape(x_test.shape[0],-1)/255
# 换onehot格式
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# 创建模型,输入784个神经元,输出10个神经元
model = Sequential()
model.add(Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh'))
model.add(Dropout(0.2))# 加了droupout不一定提高测试准确率
model.add(Dense(units=100,bias_initializer='one',activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(units=10,bias_initializer='one',activation='softmax'))
#定义优化器
sgd = SGD(lr=0.2)
# 优化器在优化时,计算损失值,同时在训练过程中计算评价指标准确率
model.compile(optimizer=sgd,
loss = 'categorical_crossentropy',
metrics=['accuracy'])
# 训练模型
# epochs即周期,一个周期即学习一遍所有样本(本题是60000张图片),学习10遍
# batch_size=32在每个周期内的60000张图片,每次取32张训练,直到取完60000张
model.fit(x_train,y_train,batch_size=32,epochs=10)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('\ntest accuracy',accuracy)
loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('\ntrain accuracy',accuracy)
06正则化+07优化器
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.regularizers import l2
#载入数据
# x_train (60000, 28, 28)
# y_train (60000,)
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print("x_shape",x_train.shape)
print("y_shape",y_train.shape)
# 数据维度变化 (60000, 28, 28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255
x_test = x_test.reshape(x_test.shape[0],-1)/255
# 换onehot格式
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# 创建模型,输入784个神经元,输出10个神经元
model = Sequential()
model.add(Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)))
model.add(Dense(units=100,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)))
model.add(Dense(units=10,bias_initializer='one',activation='softmax',kernel_regularizer=l2(0.0003)))
#定义优化器
sgd = SGD(lr=0.2)
# 优化器在优化时,计算损失值,同时在训练过程中计算评价指标准确率
model.compile(optimizer=sgd,
loss = 'categorical_crossentropy',
metrics=['accuracy'])
# 训练模型
# epochs即周期,一个周期即学习一遍所有样本(本题是60000张图片),学习10遍
# batch_size=32在每个周期内的60000张图片,每次取32张训练,直到取完60000张
model.fit(x_train,y_train,batch_size=32,epochs=10)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('\ntest accuracy',accuracy)
loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('\ntrain accuracy',accuracy)
08CNN手写数字识别
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
# 换onehot格式
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# 定义顺序模型
model = Sequential()
# 第一个卷积层
# input_shape 输入平面
# filters 卷积核/滤波器个数
# kernel_size 卷积窗口大小这里是5*5
# strides 步长
# padding (same/valid)
# activation 激活函数
model.add(Convolution2D(
input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides = 1,
padding = 'same',
activation = 'relu'
))
# 第一个池化层,第一个卷积层的输出是32*28*28
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same'
))
# 第一个池化层输出的结果是32*14*14,第二个卷积层
model.add(Convolution2D(filters=64,kernel_size = 5,strides = 1,padding = 'same',activation = 'relu'))
# 第二个卷积层的结果是64*14*14,第二个池化层
model.add(MaxPooling2D(pool_size = 2,strides = 2,padding = 'same'))
# 第二个池化层的结果是64*7*7,然后扁平化为1维,长度为64*7*7
model.add(Flatten())
# 第一个全连接层
model.add(Dense(1024,activation = 'relu'))
# 丢弃层
model.add(Dropout(0.5))
# 第二个全连接层
model.add(Dense(10,activation = 'softmax'))
# 以上完成了模型的结构,开始编译
# 定义优化器、损失函数、准确
adam = Adam(lr=1e-4)
model.compile(optimizer = adam, loss = 'categorical_crossentropy',metrics = ['accuracy'])
# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('test loss',loss)
print('test accuracy',accuracy)
09RNN应用
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.recurrent import SimpleRNN
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model # 载入模型
# 数据长度 一行有28个像素(每个序列有多少个像素)
input_size = 28
# 序列长度一共有28行(有多少个序列)
time_steps = 28
# 隐藏层cell个数
cell_size = 50
# 载入数据
# (60000,28,28)放入模型需要转化为这种维度,
# 60000表示样本数,第一个28表示time_steps(一个样本有多少个序列),第二个28表示一行28个像素(每个序列有多少个像素)
(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train = x_train/255.0
x_test = x_test/255.0
# 转化为onehot
y_train = to_categorical(y_train,num_classes = 10)
y_test = to_categorical(y_test,num_classes = 10)
model = load_model('model.h5')
# 载入模型之后,还能继续优化模型
model.fit(x_train,y_train,batch_size=64,epochs = 2)
# model = Sequential()
# model.add(SimpleRNN(
# units = cell_size,#输出
# input_shape = (time_steps,input_size),#输入
# ))
#
# # 输出层
# model.add(Dense(10,activation = 'softmax'))
# # 定义优化器
# adam = Adam(lr=1e-4)
# model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])
# model.fit(x_train,y_train,batch_size=64,epochs = 10)
loss,accuracy = model.evaluate(x_test,y_test)
print('test loss',loss)
print('test accuracy',accuracy)
# 保存模型
model.save('model.h5') # HDF5文件,pip install h5py
# 保存模型参数,载入参数
model.save_weights('my_model_weights.h5')
model.load_weights('my_model_weights.h5')
# 保存网络结构,载入网络结构
from tensorflow.keras.models import model_from_json
json_string = model.to_json()
model = model_from_json(json_string)
print(json_string)
10绘制网络结构
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from tensorflow.keras.utils.vis_utils import plot_model
# 需要安装 install pydot and graphviz
# graphviz可能需要手动下载
# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
# 换onehot格式
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# 定义顺序模型
model = Sequential()
# 第一个卷积层
# input_shape 输入平面
# filters 卷积核/滤波器个数
# kernel_size 卷积窗口大小这里是5*5
# strides 步长
# padding (same/valid)
# activation 激活函数
model.add(Convolution2D(
input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides = 1,
padding = 'same',
activation = 'relu',
name = 'conv1'# 用来绘制网络结构,显示层的名字,此时show_layer_names = True
))
# 第一个池化层,第一个卷积层的输出是32*28*28
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same',
name = 'pool1'# 用来绘制网络结构,显示层的名字,此时show_layer_names = True
))
# 第一个池化层输出的结果是32*14*14,第二个卷积层
model.add(Convolution2D(filters=64,kernel_size = 5,strides = 1,padding = 'same',activation = 'relu'))
# 第二个卷积层的结果是64*14*14,第二个池化层
model.add(MaxPooling2D(pool_size = 2,strides = 2,padding = 'same'))
# 第二个池化层的结果是64*7*7,然后扁平化为1维,长度为64*7*7
model.add(Flatten())
# 第一个全连接层
model.add(Dense(1024,activation = 'relu'))
# 丢弃层
model.add(Dropout(0.5))
# 第二个全连接层
model.add(Dense(10,activation = 'softmax'))
# 以上完成了模型的结构,开始编译
# 定义优化器、损失函数、准确
adam = Adam(lr=1e-4)
model.compile(optimizer = adam, loss = 'categorical_crossentropy',metrics = ['accuracy'])
# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('test loss',loss)
print('test accuracy',accuracy)
# 绘制网络结构
plot_model(model,to_file = "model.png",show_shapes=True,show_layer_names = False,rankdir = 'TB')#TB就是从上往下,从左往右是LR
plt.figure(figsize=(10,10))
img = plt.imread("model.png")
plt.imshow(img)
plt.axes('off')
plt.show()