Sklearn 速查

时间:2022-06-09 06:07:44

版权所有,转帖注明出处



Scikit-learn是一个开源Python库,它使用统一的接口实现了一系列机器学习、预处理、交叉验证和可视化算法。

一个基本例子

from sklearn import neighbors, datasets, preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris = datasets.load_iris()
X, y = iris.data[:, :2], iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
accuracy_score(y_test, y_pred)

加载数据

数据类型可以是NumPy数组、SciPy稀疏矩阵,或者其他可转换为数组的类型,如panda DataFrame等。

import numpy as np
X = np.random.random((10,5))
y = np.array(['M','M','F','F','M','F','M','M','F','F','F'])
X[X < 0.7] = 0

预处理数据

标准化/Standardization

from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)

归一化/Normalization

from sklearn.preprocessing import Normalizer
scaler = Normalizer().fit(X_train)
normalized_X = scaler.transform(X_train)
normalized_X_test = scaler.transform(X_test)

二值化/Binarization

from sklearn.preprocessing import Binarizer
binarizer = Binarizer(threshold=0.0).fit(X)
binary_X = binarizer.transform(X)

类别特征编码

from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
y = enc.fit_transform(y)

缺失值估算

>>>from sklearn.preprocessing import Imputer
>>>imp = Imputer(missing_values=0, strategy='mean', axis=0)
>>>imp.fit_transform(X_train)

生成多项式特征

from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(5)
oly.fit_transform(X)

训练与测试数据分组

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0)

创建模型

有监督学习模型

线性回归

from sklearn.linear_model import LinearRegression
lr = LinearRegression(normalize=True)

支持向量机(SVM)

from sklearn.svm import SVC
svc = SVC(kernel='linear')

朴素贝叶斯

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()

KNN

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()

无监督学习模型

主成分分析(PCA)

from sklearn.decomposition import PCA
pca = PCA(n_components=0.95)

k均值/K Means

from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=3, random_state=0)

模型拟合

有监督学习

lr.fit(X, y)
knn.fit(X_train, y_train)
svc.fit(X_train, y_train)

无监督学习

k_means.fit(X_train)
pca_model = pca.fit_transform(X_train)

模型预测

有监督学习

y_pred = svc.predict(np.random.random((2,5)))
y_pred = lr.predict(X_test)
y_pred = knn.predict_proba(X_test))

无监督学习

y_pred = k_means.predict(X_test)

评估模型性能

分类指标

准确度

knn.score(X_test, y_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)

分类报告

from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred)))

混淆矩阵

from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, y_pred)))

回归指标

平均绝对误差

from sklearn.metrics import mean_absolute_error
y_true = [3, -0.5, 2])
mean_absolute_error(y_true, y_pred))

均方差

from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred))

$R^2$分数

from sklearn.metrics import r2_score
r2_score(y_true, y_pred))

聚类指标

调整兰德系数

from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y_true, y_pred))

同质性/Homogeneity

from sklearn.metrics import homogeneity_score
homogeneity_score(y_true, y_pred))

调和平均指标/V-measure

from sklearn.metrics import v_measure_score
metrics.v_measure_score(y_true, y_pred))

交叉验证

print(cross_val_score(knn, X_train, y_train, cv=4))
print(cross_val_score(lr, X, y, cv=2))

模型调优

网格搜索

from sklearn.grid_search import GridSearchCV
params = {"n_neighbors": np.arange(1,3), "metric": ["euclidean", "cityblock"]}
grid = GridSearchCV(estimator=knn,param_grid=params)
grid.fit(X_train, y_train)
print(grid.best_score_)
print(grid.best_estimator_.n_neighbors)

随机参数优化

from sklearn.grid_search import RandomizedSearchCV
params = {"n_neighbors": range(1,5), "weights": ["uniform", "distance"]}
rsearch = RandomizedSearchCV(estimator=knn,
param_distributions=params,
cv=4,
n_iter=8,
random_state=5)
rsearch.fit(X_train, y_train)
print(rsearch.best_score_)