""" python归一化函数MinMaxScaler的理解 class sklearn.preprocessing.MinMaxScaler(feature_range=0, 1, *, copy=True) """ from sklearn.preprocessing import MinMaxScaler import numpy as np x = np.array([[1., -1., 2.], [2., 0., 0.], [0., 1., -1.]]) scaler = MinMaxScaler() """ MinMaxScaler实现逻辑: x_std = (x-x.min(axis = 0))/(x.max(axis = 0)-x.min(axis=0)) x_scaled = x_std*(max-min)+min 这里的max是MinMaxScaler()函数的参数feature_range区间的最大值, feature_range默认最小值为0,最大值为1, 也就是x_scaled = x_std*(max-min)+min中的max为1,min为0. axis = 0 为列,也就是x.min(axis = 0)为每列的最小值 """ result = scaler.fit_transform(x) """ 以第二行第三列为例 x = 0 x.min(axis=0)= -1 x.max(axis=0)= 2 所以 x_std = (x-x.min(axis = 0))/(x.max(axis = 0)-x.min(axis=0)) = (0-(-1))/(2-(-1))=1/3 x_scaled = x_std*(max-min)+min = (1/3)*(1-0)+0=1/3 """ print(result)
结果:
[[0.5 0. 1. ]
[1. 0.5 0.33333333]
[0. 1. 0. ]]