注释:Ng的视频有完整的推到步骤,不过理论和实践还是有很大差别的,代码实现还得完成
1.Logistic回归理论
http://www.cnblogs.com/wjy-lulu/p/7759515.html,Ng的推导很完美,看懂就可以了,没必要自己推导一遍,因为几天不用就忘记 了。
2.代码实现
2.1全局梯度上升
每次训练针对整体,依据整体去找最值。
好处:容易过滤局部极值,找到真正的全局极值。
坏处:整体数据太多,花费时间太久,而且新来的样本必须重新训练。
推倒公式:见博文刚开始的链接,Ng大神的全部推导及证明!
def loadDataSet():
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()#分割空格
#改变存储data:[[a,b],[c,d]]/
# labels:[1,0,0,1...]
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append([int(lineArr[2])])
return dataMat, labelMat
def sigmoid(intX):
return 1.0/(1.0+np.exp(-intX))
#全局梯度上升法
def gradAscent(dataMatIn,classLabels):
dataMatrix = np.mat(dataMatIn)
labelsMat = np.mat(classLabels)
m, n = dataMatrix.shape
alpha = 0.001
maxCycle = 200
weight = np.ones((n,1))#这里为了简单写,把b也当作一个w了
for k in range(maxCycle):
h = sigmoid(dataMatrix*weight)
error = labelsMat - np.mat(h)
weight = weight + alpha*dataMatrix.transpose()*error
return weight
2.1简单分类可视化
利用matplotlib画出简单分类的决策边界
注意:这里plot转化为list之后绘制的,看网上说可以直接用matrix,但是我运行出错。
def plotBestFit(weight):
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)#转化为数组
n = dataArr.shape[0]
xcode1=[];ycode1=[]
xcode2=[];ycode2=[]
for i in range(n):
if int(labelMat[i][0])==1:
xcode1.append(dataArr[i,1])
ycode1.append(dataArr[i,2])
else:
xcode2.append(dataArr[i,1])
ycode2.append(dataArr[i,2])
fig = plt.figure("data_x_y")
ax = fig.add_subplot(111)
ax.scatter(xcode1,ycode1,s=30,c='r',marker='s')
ax.scatter(xcode2,ycode2,s=30,c='g')
x = np.mat(np.arange(-3.0,3.0,0.1))
y = (-weight[0]-weight[1]*x)/weight[2]
ax.plot(x.tolist()[0],y.tolist()[0])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
2.3局部随机梯度上升法及改进
局部随机梯度:和全局相对,利用单个样本更新W,同时又是利用正太分布的规律去随机选择样本的次序。
好处:‘局部’训练效率高,而且新的样本可以直接添加不用重新训练,‘随机’解决了样本规律性的波动,树上有图解。
坏处:可能得到局部极值。
#局部梯度上升法-老版本
def stoGradAscent0(dataMatrix,classLabels):
m,n = dataMatrix.shape
alpha = 0.01
weights = np.ones(n)#最好别写0,因为0的拟合速度很慢
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels - h
weights = weights +alpha* error* dataMatrix[i]
return weights
#随机梯度上升法-新版本
def stoGradAscent1(dataMatraix,classLabels,numIter=150):
#alpha不断改变
#选取的样本随机改变
m,n = dataMatraix.shape
weights = np.ones(n)
for j in range(numIter):
dataIndex = list(range(m))#样本
for i in range(m):
alpha = 4/(1.0+j+i) +0.01#随着迭代次数和样本的训练次数的增加而减小
randIndex = int(np.random.uniform(0,len(dataIndex)))#随机样本下标
h = sigmoid(sum(dataMatraix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights +alpha*error*dataMatraix[randIndex]
del(dataIndex[randIndex])#执行之后删除,避免重复执行
return weights
2.4实际应用
和前面朴素贝叶斯都差不多,预处理数据-->>训练-->>测试
分类函数
def classifyVector(inX,weight):
prob = sigmoid(sum(inX*weight))
if prob>0.5: return 1.0
return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frtest = open('horseColicTest.txt')
trainingSet = []
trainingLabel = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
#最后一个是标签
for i in range(len(currLine)-1):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabel.append(float(currLine[-1]))
#改进之后的随机梯度下降法--->>>局部算法=在线学习
trainWeight = stoGradAscent1(np.array(trainingSet),trainingLabel,500)
errorCount = 0.0
numTestVec = 0.0
for line in frtest.readlines():
numTestVec += 1.0
currLine =line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(np.array(lineArr),trainWeight)) != int(currLine[21]):
errorCount+=1
errorRate = (1.0*errorCount)/(1.0*numTestVec)
print('the error Rate is : ',errorRate,'\n')
return errorRate
def multiTest():
numTest = 10;errorSum = 0.0
for k in range(numTest):
errorSum += colicTest()
print('error Rate Average is : ',(errorSum/numTest))
2.5总程序
import numpy as np
import matplotlib.pyplot as plt def loadDataSet():
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()#分割空格
#改变存储data:[[a,b],[c,d]]/
# labels:[1,0,0,1...]
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append([int(lineArr[2])])
return dataMat, labelMat
def sigmoid(intX):
return 1.0/(1.0+np.exp(-intX))
#全局梯度上升法
def gradAscent(dataMatIn,classLabels):
dataMatrix = np.mat(dataMatIn)
labelsMat = np.mat(classLabels)
m, n = dataMatrix.shape
alpha = 0.001
maxCycle = 200
weight = np.ones((n,1))#这里为了简单写,把b也当作一个w了
for k in range(maxCycle):
h = sigmoid(dataMatrix*weight)
error = labelsMat - np.mat(h)
weight = weight + alpha*dataMatrix.transpose()*error
return weight def plotBestFit(weight):
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)#转化为数组
n = dataArr.shape[0]
xcode1=[];ycode1=[]
xcode2=[];ycode2=[]
for i in range(n):
if int(labelMat[i][0])==1:
xcode1.append(dataArr[i,1])
ycode1.append(dataArr[i,2])
else:
xcode2.append(dataArr[i,1])
ycode2.append(dataArr[i,2])
fig = plt.figure("data_x_y")
ax = fig.add_subplot(111)
ax.scatter(xcode1,ycode1,s=30,c='r',marker='s')
ax.scatter(xcode2,ycode2,s=30,c='g')
x = np.mat(np.arange(-3.0,3.0,0.1))
y = (-weight[0]-weight[1]*x)/weight[2]
ax.plot(x.tolist()[0],y.tolist()[0])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
#局部梯度上升法-老版本
def stoGradAscent0(dataMatrix,classLabels):
m,n = dataMatrix.shape
alpha = 0.01
weights = np.ones(n)#最好别写0,因为0的拟合速度很慢
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels - h
weights = weights +alpha* error* dataMatrix[i]
return weights
#随机梯度上升法-新版本
def stoGradAscent1(dataMatraix,classLabels,numIter=150):
#alpha不断改变
#选取的样本随机改变
m,n = dataMatraix.shape
weights = np.ones(n)
for j in range(numIter):
dataIndex = list(range(m))#样本
for i in range(m):
alpha = 4/(1.0+j+i) +0.01#随着迭代次数和样本的训练次数的增加而减小
randIndex = int(np.random.uniform(0,len(dataIndex)))#随机样本下标
h = sigmoid(sum(dataMatraix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights +alpha*error*dataMatraix[randIndex]
del(dataIndex[randIndex])#执行之后删除,避免重复执行
return weights
#分类函数
def classifyVector(inX,weight):
prob = sigmoid(sum(inX*weight))
if prob>0.5: return 1.0
return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frtest = open('horseColicTest.txt')
trainingSet = []
trainingLabel = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
#最后一个是标签
for i in range(len(currLine)-1):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabel.append(float(currLine[-1]))
#改进之后的随机梯度下降法--->>>局部算法=在线学习
trainWeight = stoGradAscent1(np.array(trainingSet),trainingLabel,500)
errorCount = 0.0
numTestVec = 0.0
for line in frtest.readlines():
numTestVec += 1.0
currLine =line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(np.array(lineArr),trainWeight)) != int(currLine[21]):
errorCount+=1
errorRate = (1.0*errorCount)/(1.0*numTestVec)
print('the error Rate is : ',errorRate,'\n')
return errorRate
def multiTest():
numTest = 10;errorSum = 0.0
for k in range(numTest):
errorSum += colicTest()
print('error Rate Average is : ',(errorSum/numTest))