对一个表格类数据集进行数据分析,常常有以下几个步骤:
1.数据总览
- 读取数据集并了解数据集大小,原始特征维度
- 查看特征的数据类型和基本统计量
2.缺失值和唯一值
- 查看数据缺失情况
- 查看唯一值特征情况
3.深入数据
- 类别型数据
- 数值型数据(离散型、连续型)
4.数据间相关关系
- 特征与特征之间
- 特征与目标变量之间
5.用pandas_profilling生成数据报告
以一个零售风控二分类的比赛数据集为例,进行数据分析
1.总览
print(train.shape) print(test.shape) print(train.columns)
train.info()
data_train.describe()
(800000, 47) (200000, 46) Index(['id', 'loanAmnt', 'term', 'interestRate', 'installment', 'grade', 'subGrade', 'employmentTitle', 'employmentLength', 'homeOwnership', 'annualIncome', 'verificationStatus', 'issueDate', 'isDefault', 'purpose', 'postCode', 'regionCode', 'dti', 'delinquency_2years', 'ficoRangeLow', 'ficoRangeHigh', 'openAcc', 'pubRec', 'pubRecBankruptcies', 'revolBal', 'revolUtil', 'totalAcc', 'initialListStatus', 'applicationType', 'earliesCreditLine', 'title', 'policyCode', 'n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9', 'n10', 'n11', 'n12', 'n13', 'n14'], dtype='object')
对于二分类问题,还应该看下正负样本的比例,看是否存在类别不平衡问题。
train['isDefault'].value_counts()
0 640390 1 159610 Name: isDefault, dtype: int64
2.缺失情况
查看有多少列存在缺失值以及缺失率,对于缺失值非常多的列可以考虑删除,缺失很少的列考虑进行填充
train.isnull().any().sum()
22
missing=train.isnull().sum()/len(train) missing=missing[missing>0] missing.sort_values(inplace=True) missing.plot.bar()
查看是否有特征只有唯一值,若有,后续特征工程考虑删掉此变量
one_value_col=[col for col in train.columns if train[col].nunique()<=1] one_value_col
['policyCode']
3.深入数据类型
首先查看有多少列是数值型变量、多少列是分类型变量
numerical_fea=list(train.select_dtypes(exclude=['object']).columns) category_fea=list(filter(lambda x:x not in numerical_fea,list(train.columns))) print(len(numerical_fea)) print(len(category_fea)) print(category_fea) print(numerical_fea)
42 5 ['grade', 'subGrade', 'employmentLength', 'issueDate', 'earliesCreditLine'] ['id', 'loanAmnt', 'term', 'interestRate', 'installment', 'employmentTitle', 'homeOwnership', 'annualIncome',
'verificationStatus', 'isDefault', 'purpose', 'postCode', 'regionCode', 'dti', 'delinquency_2years', 'ficoRangeLow',
'ficoRangeHigh', 'openAcc', 'pubRec', 'pubRecBankruptcies', 'revolBal', 'revolUtil', 'totalAcc', 'initialListStatus',
'applicationType', 'title', 'policyCode', 'n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9', 'n10', 'n11', 'n12', 'n13', 'n14']
可以看出在46列特征中,只有5个是object类型(后续特征工程需要进行转换)
for col in category_fea: temp=train[col].value_counts() plt.figure(figsize=(10,6)) sns.barplot(x=temp.index,y=temp.values) plt.xlabel(str(col)) plt.show() plt.close('all')
其次对于数值型变量,需要划分离散型和连续型。此处定义变量取值小于10个即为离散型。
def get_numerical_serial_fea(data,feas): numerical_serial_fea=[] numerical_noserial_fea=[] for fea in feas: temp=data[fea].nunique() if temp<=10: numerical_noserial_fea.append(fea) continue numerical_serial_fea.append(fea) return numerical_serial_fea,numerical_noserial_fea numerical_serial_fea,numerical_noserial_fea=get_numerical_serial_fea(train,numerical_fea)
对于离散型变量,着重分析是否存在严重偏斜(大多数样本都取相同数值);
#绘图查看离散型变量分布 for col in numerical_noserial_fea: temp=train[col].value_counts() plt.figure() sns.barplot(x=temp.index,y=temp.values) plt.xlabel(str(col)) plt.show() plt.close('all')
对于连续型变量,着重分析数据是否服从正态分布(或对数正态分布),对于偏态的数据可能会影响预测结果。
#绘图查看连续型变量分布 f = pd.melt(train, value_vars=numerical_serial_fea) g = sns.FacetGrid(f, col="variable", col_wrap=4, sharex=False, sharey=False) g = g.map(sns.distplot, "value")
4.数据间相关关系
首先探究变量之间、变量和目标变量之间的相关关系
corr=train.corr() sns.heatmap(corr, annot=False) plt.show()
还可以根据y值不同查看各个变量的分布
loan_def=train.loc[train['isDefault']==1] loan_nodef=train.loc[train['isDefault']==0]
fig,((ax1,ax2),(ax3,ax4))=plt.subplots(2,2,figsize=(15,8)) loan_def.groupby('grade').grade.count().plot(kind='barh',ax=ax1,title='Count of grade fraud') loan_nodef.groupby('grade').grade.count().plot(kind='barh',ax=ax2,title='Count of grade fraud') loan_def.groupby('employmentLength').employmentLength.count().plot(kind='barh',ax=ax3,title='Count of grade fraud') loan_nodef.groupby('employmentLength').employmentLength.count().plot(kind='barh',ax=ax4,title='Count of grade fraud') plt.show()
fig,((ax1,ax2))=plt.subplots(1,2,figsize=(15,6)) train.loc[train['isDefault'] == 1]['loanAmnt'].plot( kind='hist', bins=100, title='Log Loan Amt - Fraud', color='r', ax=ax1) train.loc[train['isDefault'] == 0]['loanAmnt'].plot( kind='hist', bins=100, title='Log Loan Amt - Not Fraud', color='b', ax=ax2)
5.用pandas_profiling生成数据报告
import pandas_profiling pfr = pandas_profiling.ProfileReport(train) pfr.to_file("./example.html")
6.其他
查看日期格式的数据
#转化格式 issueDateDT特征表示数据日期离数据集中日期最早的日期(2007-06-01)的天数 day = pd.to_datetime(train['issueDate'],format='%Y-%m-%d') startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d') day = day.apply(lambda x: x-startdate).dt.days sns.histplot(day)
数据透视表
pivot=pd.pivot_table(train,index=['grade'],columns=['employmentLength'],values=['loanAmnt'], aggfunc=np.mean) pivot
loanAmnt | |||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|
employmentLength | 1 year | 10+ years | 2 years | 3 years | 4 years | 5 years | 6 years | 7 years | 8 years | 9 years | < 1 year |
grade | |||||||||||
A | 13335.898602 | 14639.775562 | 13388.455829 | 13486.865618 | 13553.706061 | 13496.995001 | 13775.733802 | 13935.131159 | 14192.510565 | 14072.881276 | 13560.589568 |
B | 12486.311108 | 14191.576561 | 12655.767868 | 12852.254751 | 12997.182897 | 13048.167405 | 13135.203245 | 13318.696946 | 13460.523945 | 13513.865997 | 12994.001504 |
C | 13093.052072 | 15527.287529 | 13383.550540 | 13587.211321 | 13731.955067 | 13860.520936 | 14098.561372 | 14395.124677 | 14413.680358 | 14699.868190 | 13483.717789 |
D | 14204.809266 | 16918.674549 | 14418.175926 | 14476.062066 | 14837.774220 | 14834.854212 | 15224.665884 | 15742.203467 | 15625.839781 | 15967.309875 | 14230.622259 |
E | 16304.007848 | 19339.688764 | 16762.507469 | 16840.061266 | 17080.681138 | 17478.838499 | 17938.082852 | 17567.287968 | 17981.827812 | 18108.666970 | 16209.714997 |
F | 17570.015699 | 20787.572663 | 17880.975030 | 18417.187500 | 18881.518876 | 19196.168342 | 19050.279018 | 19315.302691 | 19507.407407 | 19630.162338 | 18335.909091 |
G | 18475.923295 | 22099.393271 | 20240.042827 |