1# -*- coding: utf-8 -*- 2""" 3========================================================= 4Sparsity Example: Fitting only features 1 and 2 5========================================================= 6 7Features 1 and 2 of the diabetes-dataset are fitted and 8plotted below. It illustrates that although feature 2 9has a strong coefficient on the full model, it does not 10give us much regarding `y` when compared to just feature 1 11 12""" 13 14# Code source: Gaël Varoquaux 15# Modified for documentation by Jaques Grobler 16# License: BSD 3 clause 17 18import matplotlib.pyplot as plt 19import numpy as np 20from mpl_toolkits.mplot3d import Axes3D 21 22from sklearn import datasets, linear_model 23 24X, y = datasets.load_diabetes(return_X_y=True) 25indices = (0, 1) 26 27X_train = X[:-20, indices] 28X_test = X[-20:, indices] 29y_train = y[:-20] 30y_test = y[-20:] 31 32ols = linear_model.LinearRegression() 33ols.fit(X_train, y_train) 34 35 36# ############################################################################# 37# Plot the figure 38def plot_figs(fig_num, elev, azim, X_train, clf): 39 fig = plt.figure(fig_num, figsize=(4, 3)) 40 plt.clf() 41 ax = Axes3D(fig, elev=elev, azim=azim) 42 43 ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c="k", marker="+") 44 ax.plot_surface( 45 np.array([[-0.1, -0.1], [0.15, 0.15]]), 46 np.array([[-0.1, 0.15], [-0.1, 0.15]]), 47 clf.predict( 48 np.array([[-0.1, -0.1, 0.15, 0.15], [-0.1, 0.15, -0.1, 0.15]]).T 49 ).reshape((2, 2)), 50 alpha=0.5, 51 ) 52 ax.set_xlabel("X_1") 53 ax.set_ylabel("X_2") 54 ax.set_zlabel("Y") 55 ax.w_xaxis.set_ticklabels([]) 56 ax.w_yaxis.set_ticklabels([]) 57 ax.w_zaxis.set_ticklabels([]) 58 59 60# Generate the three different figures from different views 61elev = 43.5 62azim = -110 63plot_figs(1, elev, azim, X_train, ols) 64 65elev = -0.5 66azim = 0 67plot_figs(2, elev, azim, X_train, ols) 68 69elev = -0.5 70azim = 90 71plot_figs(3, elev, azim, X_train, ols) 72 73plt.show() 74