当前位置: 首页 > article >正文

逻辑回归-乳腺癌肿瘤预测

  1. 查看数据集基本信息,去掉异常符号。

import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer

# 导入乳腺癌肿瘤预测数据集
data = load_breast_cancer()
X = pd.DataFrame(data.data, columns=data.feature_names)
y = pd.Series(data.target, name='target')

# 查看数据集基本信息
print("数据集基本信息:")
print(X.info())

# 去掉异常符号
X.replace(to_replace='?', value=np.nan, inplace=True)
X.dropna(axis=0, inplace=True)

# 打印处理后的数据集基本信息
print("\n处理后的数据集基本信息:")
print(X.info())

输出结果:

  1. 借助LogisticRegression函数和LogisticRegressionCV函数实现逻辑回归。

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LogisticRegression, LogisticRegressionCV

from sklearn.metrics import accuracy_score, precision_score, recall_score, precision_recall_curve,f1_score,classification_report

import warnings

from sklearn.exceptions import ConvergenceWarning



# 忽略收敛警告

warnings.filterwarnings("ignore", category=ConvergenceWarning)



# 导入乳腺癌肿瘤预测数据集

data = load_breast_cancer()

X = pd.DataFrame(data.data, columns=data.feature_names)

y = pd.Series(data.target, name='target')



# 将数据集拆分为训练集和测试集

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)



# 初始化LogisticRegression模型并训练

lr_model = LogisticRegression()

lr_model.fit(X_train, y_train)



# 使用测试集进行预测

y_pred_lr = lr_model.predict(X_test)



# 输出测试数据个数和预测正确个数

test_data_count = len(y_test)

correct_predictions_lr = accuracy_score(y_test, y_pred_lr, normalize=False)

f1_lr = f1_score(y_test, lr_model.predict(X_test))



positive_count = pd.Series(y_pred_lr).value_counts()[1]

negative_count = pd.Series(y_pred_lr).value_counts()[0]



y_true = y_test  # 在实际情况中,这应该是真实的标签



# 生成分类报告

report = classification_report(y_true, y_pred_lr, target_names=['class 0', 'class 1'], output_dict=True)



# 提取加权平均和算数平均

weighted_precision_avg = report['weighted avg']['precision']

weighted_recall_avg = report['weighted avg']['recall']

weighted_f1_avg = report['weighted avg']['f1-score']



macro_precision_avg = report['macro avg']['precision']

macro_recall_avg = report['macro avg']['recall']

macro_f1_avg = report['macro avg']['f1-score']



print("Logistic Regression模型:")

print(f"测试数据个数: {test_data_count}")

print(f"预测正确个数: {correct_predictions_lr}")

print(f"权重: {lr_model.coef_}")

print(f"预测结果: {y_pred_lr}")

print(f"准确率: {accuracy_score(y_test, y_pred_lr)}")

print(f"查准率与查重率:      查准率               查全率               F1_score               support")

print(f"   良性          {precision_score(y_test, y_pred_lr,pos_label=1)}   ,  {recall_score(y_test, y_pred_lr,pos_label=1)}   ,  {f1_score(y_test, lr_model.predict(X_test),pos_label=1)}  ,   {positive_count}")

print(f"   恶性          {precision_score(y_test, y_pred_lr,pos_label=0)}  ,   {recall_score(y_test, y_pred_lr,pos_label=0)}  ,   {f1_score(y_test, lr_model.predict(X_test),pos_label=0)}   ,  {negative_count}")

print(f"   accuracy                                                      {recall_score(y_test, y_pred_lr)}   ,  {len(X_test)}")

print(f"   macro avg     {macro_precision_avg}   ,  {macro_recall_avg}   ,  {macro_f1_avg}   ,  {len(X_test)}")

print(f"weighted avg     {weighted_precision_avg}  ,   {weighted_recall_avg}   ,  {weighted_f1_avg}  ,  {len(X_test)}")





# 初始化LogisticRegressionCV模型并训练

lrcv_model = LogisticRegressionCV()

lrcv_model.fit(X_train, y_train)



# 使用测试集进行预测

y_pred_lrcv = lrcv_model.predict(X_test)



# 输出测试数据个数和预测正确个数

correct_predictions_lrcv = accuracy_score(y_test, y_pred_lrcv, normalize=False)

correct_predictions_lrcv = accuracy_score(y_test, y_pred_lr, normalize=False)

f1_lrcv = f1_score(y_test, lr_model.predict(X_test))



positive_count = pd.Series(y_pred_lrcv).value_counts()[1]

negative_count = pd.Series(y_pred_lrcv).value_counts()[0]



y_true = y_test  # 在实际情况中,这应该是真实的标签



# 生成分类报告

report = classification_report(y_true, y_pred_lrcv, target_names=['class 0', 'class 1'], output_dict=True)



# 提取加权平均和算数平均

weighted_precision_avg = report['weighted avg']['precision']

weighted_recall_avg = report['weighted avg']['recall']

weighted_f1_avg = report['weighted avg']['f1-score']



macro_precision_avg = report['macro avg']['precision']

macro_recall_avg = report['macro avg']['recall']

macro_f1_avg = report['macro avg']['f1-score']



print('\n')

print("Logistic RegressionCV模型:")

print(f"测试数据个数: {test_data_count}")

print(f"预测正确个数: {correct_predictions_lrcv}")

print(f"权重: {lrcv_model.coef_}")

print(f"预测结果: {y_pred_lrcv}")

print(f"准确率: {accuracy_score(y_test, y_pred_lrcv)}")

print(f"查准率与查重率:      查准率               查全率               F1_score               support")

print(f"   良性          {precision_score(y_test, y_pred_lrcv,pos_label=1)}   ,  {recall_score(y_test, y_pred_lrcv,pos_label=1)}   ,  {f1_score(y_test, lrcv_model.predict(X_test),pos_label=1)}  ,   {positive_count}")

print(f"   恶性          {precision_score(y_test, y_pred_lrcv,pos_label=0)}  ,                {recall_score(y_test, y_pred_lrcv,pos_label=0)}  ,   {f1_score(y_test, lrcv_model.predict(X_test),pos_label=0)}   ,  {negative_count}")

print(f"   accuracy                                                      {recall_score(y_test, y_pred_lr)}   ,  {len(X_test)}")

print(f"   macro avg     {macro_precision_avg}   ,  {macro_recall_avg}   ,  {macro_f1_avg}   ,  {len(X_test)}")

print(f"weighted avg     {weighted_precision_avg}  ,   {weighted_recall_avg}   ,  {weighted_f1_avg}  ,  {len(X_test)}")

打印测试数据个数和预测正确个数,打印权重、准确率、预测结果和查准率与查重率:

输出结果:

4.使用不同参数C(正则化强度倒数)的LogisticRegression函数进行实验,设置C=0.1和0.01进行实验。

打印结果:

C=0.1

C=0.01

5.绘制ROC曲线:

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LogisticRegression

from sklearn.metrics import roc_curve, auc

import matplotlib.pyplot as plt



# 导入乳腺癌肿瘤预测数据集

data = load_breast_cancer()

X = pd.DataFrame(data.data, columns=data.feature_names)

y = pd.Series(data.target, name='target')



# 将数据集拆分为训练集和测试集

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)



# 初始化LogisticRegression模型并训练

lr_model = LogisticRegression(C=0.1)

lr_model.fit(X_train, y_train)



# 使用测试集进行预测

y_scores = lr_model.decision_function(X_test)



# 计算ROC曲线的参数

fpr, tpr, thresholds = roc_curve(y_test, y_scores)



# 计算AUC(Area Under the Curve)

roc_auc = auc(fpr, tpr)



# 绘制ROC曲线

plt.figure(figsize=(8, 6))

plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = {:.2f})'.format(roc_auc))

plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')

plt.xlim([0.0, 1.0])

plt.ylim([0.0, 1.05])

plt.xlabel('False Positive Rate')

plt.ylabel('True Positive Rate')

plt.title('Receiver Operating Characteristic (ROC) Curve')

plt.legend(loc='lower right')

plt.show()

运行结果:

6.进行十折交叉验证。

from sklearn.model_selection import train_test_split,cross_val_score

from sklearn.linear_model import LogisticRegression

from sklearn.metrics import roc_curve, auc

import warnings

from sklearn.exceptions import ConvergenceWarning



# 忽略收敛警告

warnings.filterwarnings("ignore", category=ConvergenceWarning)



# 导入乳腺癌肿瘤预测数据集

data = load_breast_cancer()

X = pd.DataFrame(data.data, columns=data.feature_names)

y = pd.Series(data.target, name='target')



# 将数据集拆分为训练集和测试集

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)



# 初始化LogisticRegression模型并训练

lr_model = LogisticRegression(C=0.1)

lr_model.fit(X_train, y_train)



# 进行十折交叉验证

cv_scores = cross_val_score(lr_model, X, y, cv=10, scoring='accuracy')



# 输出每折交叉验证的准确率

print("每折交叉验证的准确率:")

for i, score in enumerate(cv_scores, start=1):

    print(f"折数 {i}: {score}")



# 输出平均准确率

print(f"\n平均准确率: {np.mean(cv_scores)}")

运行结果:

从上述实验中我们可以看出,在数据集比较小的时候,LogisticRegression模型的分类效果是比LogisticRegressionCV略好的;正则化参数C=0.1时的模型分类结果要好于C=0.01时的结果。

LogisticRegression 是用于二分类问题的线性模型。它通过使用逻辑函数(sigmoid函数)将线性组合的输入映射到0和1之间的概率。训练时,使用最大似然估计来拟合模型的参数,其中损失函数是对数损失。

LogisticRegressionCV 是在交叉验证的框架下执行对逻辑回归的超参数(正则化强度 C)进行自动调优的类。它通过指定一组 Cs 值(正则化强度的倒数)以及交叉验证的折数来寻找最佳的超参数。

两个模型最大的区别就是LogisticRegression是需要手动调参的,而LogisticRegressionCV是自动寻优的,但这并不代表LogisticRegressionCV模型的训练结果就一定比LogisticRegression好,比如本次实验中,由于数据集较小,特征较少,交叉验证结果并不明显,还可能导致过拟合现象。

具体来说,我们可以看到LogisticRegression的分类准确率为0.9649,查准率和查全率分别为0.9672和0.9580;而LogisticRegressionCV的分类准确率为0.9561,查准率和查全率分别为0.9604和0.9464。而且它们分类正确的数据都是110个,所以实际上两个模型的差别并不是很大。

当正则化参数C变化的时候,C=0.1时模型分类正确率为0.96,而C=0.01时模型分类正确率为0.94,显然C=0.1更适合作为当前数据集下的逻辑回归正则化参数,而C=0.01则有点过小了,使得模型有些过拟合。


http://www.kler.cn/a/568306.html

相关文章:

  • 【PID】STM32通过闭环PID控制电机系统
  • k8s拉取harbor镜像部署
  • golang介绍,特点,项目结构,基本变量类型与声明介绍(数组,切片,映射),控制流语句介绍(条件,循环,switch case)
  • 海洋cmsv9报错注入,order by 和limit注入
  • NFC拉起微信小程序申请URL scheme 汇总
  • JavaScript 简单类型与复杂类型-简单类型传参
  • Spring Boot拦截器(Interceptor)与过滤器(Filter)详细教程
  • EtherCAT总线学习笔记
  • 【03】STM32F407 HAL 库框架设计学习
  • openEuler环境下GlusterFS分布式存储集群部署指南
  • 前缀和 C++
  • 【pytest框架源码分析三】pluggy源码分析之hook注册调用流程
  • WordPress多语言插件GTranslate
  • (17)CT137A- UART串口发送实验
  • 【江科协-STM32】5. 输出比较
  • 当我删除word文件时无法删除,提示:操作无法完成,因为已在Microsoft Word中打开
  • Linux下的网络通信编程
  • 2.8作业
  • 自然语言处理:稠密向量表示
  • 基于PHP和MySQL的用户登录注册系统实现