n_components =150print("Extracting the top %d eigenfaces from %d faces"% (n_components, X_train.shape[0]))t0 =time()#計時pca =PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train)print("done in %0.3fs"% (time() - t0))eigenfaces = pca.components_.reshape((n_components, h, w))print("Projecting the input data on the eigenfaces orthonormal basis")t0 =time()#進行降維X_train_pca = pca.transform(X_train)X_test_pca = pca.transform(X_test)print("done in %0.3fs"% (time() - t0))
print("Fitting the classifier to the training set")t0 =time()param_grid ={'C': [1e3,5e3,1e4,5e4,1e5],'gamma': [0.0001,0.0005,0.001,0.005,0.01,0.1],}clf =GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
skernel='rbf':使用(高斯)徑向基函數
```python
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
(五)對測試集中進行預測
使用y_pred = clf.predict(X_test_pca),對測試集進行預測。
print("Predicting people's names on the test set")t0 =time()#用最佳發現的參數對評估器進行預測。y_pred = clf.predict(X_test_pca)print("done in %0.3fs"% (time() - t0))print(classification_report(y_test, y_pred, target_names=target_names))print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
from time import timeimport loggingimport matplotlib.pyplot as pltfrom sklearn.model_selection import train_test_splitfrom sklearn.model_selection import GridSearchCVfrom sklearn.datasets import fetch_lfw_peoplefrom sklearn.metrics import classification_reportfrom sklearn.metrics import confusion_matrixfrom sklearn.decomposition import PCAfrom sklearn.svm import SVCprint(__doc__)# Display progress logs on stdoutlogging.basicConfig(level=logging.INFO, format='%(asctime)s%(message)s')# ############################################################################## Download the data, if not already on disk and load it as numpy arrayslfw_people =fetch_lfw_people(min_faces_per_person=70, resize=0.4)# introspect the images arrays to find the shapes (for plotting)n_samples, h, w = lfw_people.images.shape# for machine learning we use the 2 data directly (as relative pixel# positions info is ignored by this model)X = lfw_people.datan_features = X.shape[1]# the label to predict is the id of the persony = lfw_people.targettarget_names = lfw_people.target_namesn_classes = target_names.shape[0]print("Total dataset size:")print("n_samples: %d"% n_samples)print("n_features: %d"% n_features)print("n_classes: %d"% n_classes)# ############################################################################## Split into a training set and a test set using a stratified k fold# split into a training and testing setX_train, X_test, y_train, y_test =train_test_split( X, y, test_size=0.25, random_state=42)# ############################################################################## Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled# dataset): unsupervised feature extraction / dimensionality reductionn_components =150print("Extracting the top %d eigenfaces from %d faces"% (n_components, X_train.shape[0]))t0 =time()pca =PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train)print("done in %0.3fs"% (time() - t0))eigenfaces = pca.components_.reshape((n_components, h, w))print("Projecting the input data on the eigenfaces orthonormal basis")t0 =time()X_train_pca = pca.transform(X_train)X_test_pca = pca.transform(X_test)print("done in %0.3fs"% (time() - t0))# ############################################################################## Train a SVM classification modelprint("Fitting the classifier to the training set")t0 =time()param_grid ={'C': [1e3,5e3,1e4,5e4,1e5],'gamma': [0.0001,0.0005,0.001,0.005,0.01,0.1],}clf =GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)clf = clf.fit(X_train_pca, y_train)print("done in %0.3fs"% (time() - t0))print("Best estimator found by grid search:")print(clf.best_estimator_)# ############################################################################## Quantitative evaluation of the model quality on the test setprint("Predicting people's names on the test set")t0 =time()y_pred = clf.predict(X_test_pca)print("done in %0.3fs"% (time() - t0))print(classification_report(y_test, y_pred, target_names=target_names))print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))# ############################################################################## Qualitative evaluation of the predictions using matplotlibdefplot_gallery(images,titles,h,w,n_row=3,n_col=4):"""Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8* n_col, 2.4* n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)for i inrange(n_row * n_col): plt.subplot(n_row, n_col, i +1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(())# plot the result of the prediction on a portion of the test setdeftitle(y_pred,y_test,target_names,i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]return'predicted: %s\ntrue: %s'% (pred_name, true_name)prediction_titles = [title(y_pred, y_test, target_names, i)for i inrange(y_pred.shape[0])]plot_gallery(X_test, prediction_titles, h, w)# plot the gallery of the most significative eigenfaceseigenface_titles = ["eigenface %d"% i for i inrange(eigenfaces.shape[0])]plot_gallery(eigenfaces, eigenface_titles, h, w)plt.show()