diff --git a/exercises/02450Toolbox_Python/Scripts/ex2_2_2.py b/exercises/02450Toolbox_Python/Scripts/ex2_2_2.py index 3d6104b2aae59b5bddd2d2afa9157d3d2e181485..d27f32b2c138d660f4a66c819dc0690387d0e0e8 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex2_2_2.py +++ b/exercises/02450Toolbox_Python/Scripts/ex2_2_2.py @@ -14,6 +14,7 @@ from matplotlib.pyplot import ( xlabel, ylabel, yticks, + tight_layout ) from scipy.io import loadmat @@ -99,6 +100,7 @@ for d in range(D): I = np.reshape(W[digit_ix, :] + X.mean(0), (16, 16)) imshow(I, cmap=cm.gray_r) title("Reconstr.") +tight_layout() # Visualize the pricipal components @@ -110,6 +112,7 @@ for k in range(K): I = np.reshape(V[:, k], (16, 16)) imshow(I, cmap=cm.hot) title("PC{0}".format(k + 1)) +tight_layout() # output to screen show() diff --git a/exercises/02450Toolbox_Python/Scripts/ex3_3_1.py b/exercises/02450Toolbox_Python/Scripts/ex3_3_1.py index f5cfc462ae073133074a692c8080b0706d084ee5..4fd6c531039d63145b701bc8d785c2241dcec549 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex3_3_1.py +++ b/exercises/02450Toolbox_Python/Scripts/ex3_3_1.py @@ -37,6 +37,9 @@ sim_to_index = sorted(zip(sim,noti)) plt.figure(figsize=(12,8)) plt.subplot(3,1,1) +# set font size +plt.rcParams.update({'font.size': 16}) + img_hw = int(np.sqrt(len(X[0]))) img = np.reshape(X[i], (img_hw,img_hw)) if transpose: img = img.T diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py index e3a77ebc106a018ad928c30c1da83736957aa782..8ecfea5c64bb0a511a4bf4078c983aeaeed000ad 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py +++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py @@ -1,7 +1,7 @@ # exercise 4.1.1 import numpy as np -from matplotlib.pyplot import figure, hist, plot, show, subplot, title +from matplotlib.pyplot import figure, hist, plot, show, subplots, title # Number of samples N = 200 @@ -21,12 +21,10 @@ X = np.random.normal(mu, s, N).T X = np.random.randn(N).T * s + mu # Plot the samples and histogram -figure(figsize=(12, 4)) -title("Normal distribution") -subplot(1, 2, 1) -plot(X, ".") -subplot(1, 3, 3) -hist(X, bins=nbins) +fig, ax = subplots(1,2, figsize = (12,4)) +fig.suptitle("Normal distribution") +ax[0].plot(X,".") +ax[1].hist(X,bins=nbins) show() print("Ran Exercise 4.1.1") diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_3_1.py b/exercises/02450Toolbox_Python/Scripts/ex4_3_1.py index 646d4d52b2324f2906a586b5a3da6d05997a454e..ec5bb5d786f22d1a21d77cb8916cee65d6fbea69 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex4_3_1.py +++ b/exercises/02450Toolbox_Python/Scripts/ex4_3_1.py @@ -13,6 +13,7 @@ from matplotlib.pyplot import ( xticks, ylim, yticks, + tight_layout ) from scipy.io import loadmat from scipy.stats import zscore @@ -34,6 +35,7 @@ figure() title("Wine: Boxplot") boxplot(X) xticks(range(1, M + 1), attributeNames, rotation=45) +tight_layout() # From this it is clear that there are some outliers in the Alcohol # attribute (10x10^14 is clearly not a proper value for alcohol content) @@ -44,6 +46,7 @@ figure(figsize=(12, 6)) title("Wine: Boxplot (standarized)") boxplot(zscore(X, ddof=1), attributeNames) xticks(range(1, M + 1), attributeNames, rotation=45) +tight_layout() # This plot reveals that there are clearly some outliers in the Volatile # acidity, Density, and Alcohol attributes, i.e. attribute number 2, 8, diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py index 0790af6dbc80fc6c366a257ea8d2c4c0eddc6e9c..cb2317551793c245075d629ed6d13873e3495508 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py +++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py @@ -3,7 +3,7 @@ import sklearn.linear_model as lm # requires wine data from exercise 5.1.5 from ex5_1_5 import * -from matplotlib.pylab import figure, hist, plot, show, subplot, xlabel, ylabel +from matplotlib.pylab import figure, hist, plot, show, subplots, xlabel, ylabel, tight_layout # Split dataset into features and target vector alcohol_idx = attributeNames.index("Alcohol") @@ -21,13 +21,15 @@ y_est = model.predict(X) residual = y_est - y # Display scatter plot -figure() -subplot(2, 1, 1) -plot(y, y_est, ".") -xlabel("Alcohol content (true)") -ylabel("Alcohol content (estimated)") -subplot(2, 1, 2) -hist(residual, 40) +fig, ax = subplots(nrows=2,ncols=1) +ax[0].plot(y, y_est, ".") +ax[0].set_xlabel("Alcohol content (true)") +ax[0].set_ylabel("Alcohol content (estimated)") + +ax[1].hist(residual, 40) +ax[1].set_xlabel("Residual") +ax[1].set_ylabel("Count") +tight_layout() show() diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py index 8e44343b6b81236db68eb2642651834cbeaaca69..c7ad3fdbca3e78950bc997dff1be9b7bc6d3e681 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py +++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py @@ -3,7 +3,7 @@ import sklearn.linear_model as lm # requires data from exercise 5.1.4 from ex5_1_5 import * -from matplotlib.pylab import figure, hist, plot, show, subplot, xlabel, ylabel +from matplotlib.pylab import figure, hist, plot, show, subplot, xlabel, ylabel, tight_layout # Split dataset into features and target vector alcohol_idx = attributeNames.index("Alcohol") @@ -54,6 +54,7 @@ plot(Xfava, residual, ".r") xlabel("Fixed*Volatile Acidity") ylabel("Residual") +tight_layout() show() print("Ran Exercise 5.2.5") diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py index 64f7350e3bec2f9fe166dc3c74b2df93e6e7e244..e59ce50b0ff15ec7072eac9ddd077a413a3422ba 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py +++ b/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py @@ -2,7 +2,7 @@ import importlib_resources import numpy as np import sklearn.linear_model as lm -from matplotlib.pyplot import clim, figure, plot, show, subplot, title, xlabel, ylabel +from matplotlib.pyplot import clim, figure, plot, show, subplot, title, xlabel, ylabel, subplots, suptitle from scipy.io import loadmat from sklearn import model_selection @@ -117,13 +117,18 @@ else: y_est= m.predict(X[:,ff]) residual=y-y_est - figure(k+1, figsize=(12,6)) - title('Residual error vs. Attributes for features selected in cross-validation fold {0}'.format(f)) + # Determine the number of rows and columns for subplots + nrows = int(len(ff) ** 0.5) + ncols = int(len(ff) / nrows) + (len(ff) % nrows > 0) + + fig, ax = subplots(nrows=nrows, ncols=ncols, num=k+1, figsize=(12,6)) + ax = ax.flatten() + + suptitle('Residual error vs. Attributes for features selected in cross-validation fold {0}'.format(f)) for i in range(0,len(ff)): - subplot(2, int( np.ceil(len(ff)/2)), i+1) - plot(X[:,ff[i]],residual,'.') - xlabel(attributeNames[ff[i]]) - ylabel('residual error') + ax[i].plot(X[:,ff[i]],residual,'.') + ax[i].set_xlabel(attributeNames[ff[i]]) + ax[i].set_ylabel('residual error') show() diff --git a/exercises/02450Toolbox_Python/Scripts/ex8_2_2.py b/exercises/02450Toolbox_Python/Scripts/ex8_2_2.py index 55b34a37765dc9e48b1e14da52f2f99406c46eda..70f5b661cd0ceeff01c1c2d93562ecaf8f158224 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex8_2_2.py +++ b/exercises/02450Toolbox_Python/Scripts/ex8_2_2.py @@ -149,6 +149,7 @@ summaries_axes[1].set_xlabel("Fold") summaries_axes[1].set_xticks(np.arange(1, K + 1)) summaries_axes[1].set_ylabel("Error rate") summaries_axes[1].set_title("Test misclassification rates") +plt.tight_layout() # Show the plots # plt.show(decision_boundaries.number) # try these lines if the following code fails (depends on package versions)