sklearn解决kaggle的titanic问题

1,代码如下:

# titanic Solution

# 1,prepare data
import imp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import rfft
import seaborn as sns
from IPython.display import Image
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, train_test_split
# model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
# 评估
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
plt.rcParams['figure.figsize'] = (10, 6)  # 设置输出图片大小

data = pd.read_csv('pandas_learn/data/clear_data.csv')
train = pd.read_csv('pandas_learn/data/train.csv')

X = data
y = train['Survived']


X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0)
# print(X_train.shape, X_test.shape)

# 2,Model Desgin
# 2.1,创建logisticregression模型,并用数据训练

# def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
#     fit_intercept=True, intercept_scaling=1, class_weight=None,
#     random_state=None, solver='lbfgs', max_iter=100,
#     multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
#     l1_ratio=None):

# penalty:正则化参数,三种取值:{‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’
# C:正则强度的倒数;必须为正浮点数。与支持向量机一样,较小的值指定更强的正则化。
# class_weight:样本权重,可以是一个字典或者’balanced’字符串,默认为None。对于二分类模型,可以这样指定权重:class_weight={0:0.9,1:0.1},当class_weight=‘balanced’,那么类库会根据训练样本量来计算权重。某种类型样本量越多,则权重越低,样本量越少,则权重越高。
# solver:优化算法选择参数,五种取值:newton-cg,lbfgs,liblinear,sag,saga。default = liblinear。
# liblinear适用于小数据集,而sag和saga适用于大数据集因为速度更快。如果是L2正则化,那么4种可选的算法{‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’}都可以选择。但是如果penalty是L1正则化的话,就只能选择‘liblinear’了。
# max_iter: 算法收敛的最大迭代次数,即求取损失函数最小值的迭代次数,默认是100。

# lr = LogisticRegression().fit(X_train, y_train)
# print(lr.predict(X_test))
# print("训练集得分:{:.3f}".format(lr.score(X_train,y_train)))
# print("测试集得分:{:.3f}".format(lr.score(X_test,y_test)))

# 2.2,随机森林模型
# rf = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
# print(rf.predict(X_test))
# print("训练集得分:{:.3f}".format(rf.score(X_train,y_train)))
# print("测试集得分:{:.3f}".format(rf.score(X_test,y_test)))

# 3 模型评估
# 3.1 交叉验证
# lr = LogisticRegression()
# print(cross_val_score(lr,X_train,y_train,cv = 10))

# 3.2 混淆矩阵
lr = LogisticRegression().fit(X_train, y_train)
# y_pred = lr.predict(X_train)
# print(y_pred.sum())
# print(confusion_matrix(y_train,y_pred,labels=[0,1]))
# print(classification_report(y_train,y_pred))

fpr, tpr, thresholds = roc_curve(y_test, lr.decision_function(X_test))
plt.plot(fpr, tpr, label="ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
# 找到最接近于0的阈值
close_zero = np.argmin(np.abs(thresholds))
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10, label="threshold zero", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
plt.show()
全部评论

相关推荐

点赞 收藏 评论
分享
牛客网
牛客企业服务