一元一次拟合f(x)=wx+b
使用已有的线性回归拟合函数
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
X = np.linspace(2, 10, 20).reshape(-1,1)
# f(x) = wx + b
y = np.random.randint(1, 6, size=1)*X + np.random.randint(-5, 5, size=1)
# 噪声,加盐
y += np.random.randn(20, 1)*0.8
plt.scatter(X, y, color = 'red')
lr = LinearRegression()
lr.fit(X, y)
w = lr.coef_[0, 0]
b = lr.intercept_[0]
print(w, b)
# 4.984999512157303 -3.7974579753883653
plt.scatter(X, y)
x = np.linspace(1, 11, 50)
plt.plot(x, w*x + b, color='green')
手写实现线性回归(简版)
# 使用梯度下降解决一元一次的线性问题:w,b
class LinearModel(object):
def __init__(self):
self.w = np.random.randn(1)[0]
self.b = np.random.randn(1)[0]
# 数学建模:将数据X和目标值关系用数学公式表达
def model(self,x):#model 模型,f(x) = wx + b
return self.w*x + self.b
def loss(self,x,y):#最小二乘
cost = (y - self.model(x))**2
# 梯度就是偏导数,求解两个未知数:w,b
gradient_w = 2*(y - self.model(x))*(-x)
gradient_b = 2*(y - self.model(x))*(-1)
return cost,gradient_w,gradient_b
# 梯度下降
def gradient_descent(self,gradient_w,gradient_b,learning_rate = 0.1):
# 更新w,b
self.w -= gradient_w*learning_rate
self.b -= gradient_b*learning_rate
# 训练fit
def fit(self,X,y):
count = 0 #算法执行优化了3000次,退出
tol = 0.0001
last_w = self.w + 0.1
last_b = self.b + 0.1
length = len(X)
while True:
if count > 3000:#执行的次数到了
break
# 求解的斜率和截距的精确度达到要求
if (abs(last_w - self.w) < tol) and (abs(last_b - self.b) < tol):
break
cost = 0
gradient_w = 0
gradient_b = 0
for i in range(length):
cost_,gradient_w_,gradient_b_ = self.loss(X[i,0],y[i,0])
cost += cost_/length
gradient_w += gradient_w_/length
gradient_b += gradient_b_/length
# print('---------------------执行次数:%d。损失值是:%0.2f'%(count,cost))
last_w = self.w
last_b = self.b
# 更新截距和斜率
self.gradient_descent(gradient_w,gradient_b,0.01)
count+=1
def result(self):
return self.w,self.b
使用手写线性回归拟合函数
lm = LinearModel()
lm.fit(X,y)
w_,b_ = lm.result()
plt.scatter(X,y,c = 'red')
plt.plot(x,4.9850*x - 3.7974,color = 'green')
plt.plot(x,w*x + b,color = 'blue')
plt.title('自定义的算法拟合曲线',fontproperties = 'KaiTi')
一元二次拟合
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# 一元二次
# f(x) = w1*x**2 + w2*x + b
# 二元一次
# f(x1,x2) = w1*x1 + w2*x2 + b
X = np.linspace(0,10,num = 500).reshape(-1,1)
X = np.concatenate([X**2,X],axis = 1)
# X.shape
# (500, 2)
w = np.random.randint(1,10,size = 2)
b = np.random.randint(-5,5,size = 1)
# 矩阵乘法
y = X.dot(w) + b
plt.plot(X[:,1],y,color = 'r')
plt.title('w1:%d.w2:%d.b:%d'%(w[0],w[1],b[0]))
使用sklearn自带的算法进行预测
lr = LinearRegression()
lr.fit(X,y)
print(lr.coef_,lr.intercept_)
# [1. 1.] 3.999999999999993
plt.scatter(X[:,1],y,marker = '*')
x = np.linspace(-2,12,100)
plt.plot(x,1*x**2 + 6*x + 1,color = 'green')
手写线性回归,拟合多属性,多元方程
# epoch 训练的次数,梯度下降训练多少
def gradient_descent(X,y,lr,epoch,w,b):
# 一批量多少,长度
batch = len(X)
for i in range(epoch):
# d_lost:是损失的梯度
d_loss = 0
# 梯度,斜率梯度
dw = [0 for _ in range(len(w))]
# 截距梯度
db = 0
for j in range(batch):
y_ = 0 #预测的值 预测方程 y_ = f(x) = w1*x1 + w2*x2 + b
for n in range(len(w)):
y_ += X[j][n]*w[n]
y_ += b
# (y - y_)**2 -----> 2*(y-y_)*(-1)
# (y_- y)**2 -----> 2*(y_ - y)*(1)
d_loss = -(y[j] - y_)
for n in range(len(w)):
dw[n] += X[j][n]*d_loss/float(batch)
db += 1*d_loss/float(batch)
# 更新一下系数和截距,梯度下降
for n in range(len(w)):
w[n] -= dw[n]*lr[n]
b -= db*lr[0]
return w,b
lr = [0.0001,0.0001]
w = np.random.randn(2)
b = np.random.randn(1)[0]
w_,b_ = gradient_descent(X,y,lr,5000,w,b)
print(w_,b_)
# [1.00325157 1.22027686] 2.1550745865631895