Python: 梯度下降实现多元线性回归

中文网站上找不到一个简洁的像样的梯度下降法实现的多元线性回归算法。

简洁的公式推导需要用简洁的代码来实现

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler

class Linear_Regression():
    def __init__(self):
        self.lr = 0.01
        self.num_iter = 1000
        self.w = None
        self.loss_list = []

    def fit(self,X,y):
        X = np.hstack((X,np.ones(X.shape[0]).reshape(-1,1)))
        print(X.shape)
        N, m = X.shape
        self.w = np.zeros(m)
        for i in range(self.num_iter):
            grad = X.T @ (X @ self.w - y)/N
            print("grad:{}".format(i))
            self.w -= self.lr * grad
            loss = (1/2*N) * (X @ self.w - y).T @ (X @ self.w - y)
            self.loss_list.append(loss)

    def predict(self,X):
        X = np.hstack((X,np.ones(X.shape[0]).reshape(-1,1)))
        y_pred = X @ self.w
        return y_pred

if __name__ == '__main__':
    X, y = datasets.load_boston(return_X_y=True)
    Scaler = StandardScaler()
    X = Scaler.fit_transform(X)
    print(X.shape)
    print(y.shape)
    model = LinearRegression()
    LR_MSE_list = []
    ND_MSE_list = []
    for _ in range(10):
        X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3)
        model = Linear_Regression()
        model.fit(X=X_train, y=y_train)
        y_pred = model.predict(X=X_test)
        plt.plot(np.arange(len(model.loss_list)),model.loss_list)
        plt.show()

        print(mean_squared_error(y_true=y_test, y_pred=y_pred))

        model_1 = LinearRegression()
        model_1.fit(X=X_train, y=y_train)
        y_pred = model_1.predict(X=X_test)
        print("diaobao::",mean_squared_error(y_true=y_test, y_pred=y_pred))

        break

上一篇:KLD loss的输入例子


下一篇:Pytorch实战学习(一):用Pytorch实现线性回归