来源:Deephub Imba
讯享网
特征重要性分析用于了解每个特征(变量或输入)对于做出预测的有用性或价值。目标是确定对模型输出影响最大的最重要的特征,它是机器学习中经常使用的一种方法。
-
改进的模型性能 -
减少过度拟合 -
更快的训练和推理 -
增强的可解释性
讯享网
from sklearn.datasets import load_breast_cancerfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.inspection import permutation_importancefrom sklearn.model_selection import train_test_splitimport matplotlib.pyplot as pltcancer = load_breast_cancer()X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=1)rf = RandomForestClassifier(n_estimators=100, random_state=1)rf.fit(X_train, y_train)baseline = rf.score(X_test, y_test)result = permutation_importance(rf, X_test, y_test, n_repeats=10, random_state=1, scoring=‘accuracy’)importances = result.importances_mean# Visualize permutation importancesplt.bar(range(len(importances)), importances)plt.xlabel(‘Feature Index’)plt.ylabel(‘Permutation Importance’)plt.show()
讯享网
from sklearn.datasets import load_breast_cancerfrom sklearn.ensemble import RandomForestClassifierX, y = load_breast_cancer(return_X_y=True)rf = RandomForestClassifier(n_estimators=100, random_state=1)rf.fit(X, y)importances = rf.featureimportances# Plot importancesplt.bar(range(X.shape[1]), importances)plt.xlabel(‘Feature Index’)plt.ylabel(‘Feature Importance’)plt.show()
讯享网
from sklearn.datasets import load_breast_cancerfrom sklearn.model_selection import train_test_splitfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.metrics import accuracy_scoreimport matplotlib.pyplot as pltimport numpy as np# Load sample dataX, y = load_breast_cancer(return_X_y=True)# Split data into train and test setsX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)# Train a random forest modelrf = RandomForestClassifier(n_estimators=100, random_state=1)rf.fit(X_train, y_train)# Get baseline accuracy on test database_acc = accuracy_score(y_test, rf.predict(X_test))# Initialize empty list to store importancesimportances = []# Iterate over all columns and remove one at a timefor i in range(X_train.shape[1]):X_temp = np.delete(X_train, i, axis=1)rf.fit(X_temp, y_train)acc = accuracy_score(y_test, rf.predict(np.delete(X_test, i, axis=1)))importances.append(base_acc - acc)# Plot importance scoresplt.bar(range(len(importances)), importances)plt.show()
讯享网import pandas as pdfrom sklearn.datasets import load_breast_cancerX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = ycorrelations = df.corrwith(df.y).abs()correlations.sort_values(ascending=False, inplace=True)correlations.plot.bar()

from sklearn.ensemble import RandomForestClassifierfrom sklearn.feature_selection import RFEimport pandas as pdfrom sklearn.datasets import load_breast_cancerimport matplotlib.pyplot as pltX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = yrf = RandomForestClassifier()rfe = RFE(rf, n_features_to_select=10)rfe.fit(X, y)print(rfe.ranking_)输出为[6 4 11 12 7 11 18 21 8 16 10 3 15 14 19 17 20 13 11 11 12 9 11 5 11]
讯享网
讯享网import xgboost as xgbimport pandas as pdfrom sklearn.datasets import load_breast_cancerimport matplotlib.pyplot as pltX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = ymodel = xgb.XGBClassifier()model.fit(X, y)importances = model.featureimportancesimportances = pd.Series(importances, index=range(X.shape[1]))importances.plot.bar()
讯享网from sklearn.decomposition import PCAimport pandas as pdfrom sklearn.datasets import load_breast_cancerimport matplotlib.pyplot as pltX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = ypca = PCA()pca.fit(X)plt.bar(range(pca.ncomponents), pca.explained_varianceratio)plt.xlabel(‘PCA components’)plt.ylabel(‘Explained Variance’)
讯享网from sklearn.feature_selection import f_classifimport pandas as pdfrom sklearn.datasets import load_breast_cancerimport matplotlib.pyplot as pltX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = yfval = f_classif(X, y)fval = pd.Series(fval[0], index=range(X.shape[1]))fval.plot.bar()
讯享网from sklearn.feature_selection import chi2import pandas as pdfrom sklearn.datasets import load_breast_cancerimport matplotlib.pyplot as pltX, y = load_breast_cancer(return_X_y=True)df = pd.DataFrame(X, columns=range(30))df[‘y’] = ychi_scores = chi2(X, y)chi_scores = pd.Series(chi_scores[0], index=range(X.shape[1]))chi_scores.plot.bar()

-
尝试多种方法以获得更健壮的视图; -
聚合结果的集成方法; -
更多地关注相对顺序,而不是绝对值; -
差异并不一定意味着有问题,检查差异的原因会对数据和模型有更深入的了解。
编辑 /范瑞强
审核 / 范瑞强
复核 / 范瑞强
点击下方
关注我们
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容,请联系我们,一经查实,本站将立刻删除。
如需转载请保留出处:https://51itzy.com/kjqy/188053.html