diff --git a/exercises/02450Toolbox_Python/Scripts/ex10_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex10_1_1.py
index 798eeb6834f374334b840904054bc4a594fdc3f5..74dfdbac4c336774190af114e54a1723e2939ebb 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex10_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex10_1_1.py
@@ -1,6 +1,6 @@
 # exercise 10.1.1
 import importlib_resources
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.cluster import k_means
 
@@ -24,8 +24,8 @@ K = 4
 centroids, cls, inertia = k_means(X, K)
 
 # Plot results:
-figure(figsize=(14, 9))
+plt.figure(figsize=(14, 9))
 clusterplot(X, cls, centroids, y)
-show()
+plt.show()
 
 print("Ran Exercise 10.1.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex10_1_3.py b/exercises/02450Toolbox_Python/Scripts/ex10_1_3.py
index a427cac0a34cafd553024aad1d0263d143330238..3c2c04407ebbd7a93659d79fb6e29e1ce4a63548 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex10_1_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex10_1_3.py
@@ -1,7 +1,7 @@
 # exercise 10.1.3
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import figure, legend, plot, show, title, ylim
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.cluster import k_means
 
@@ -35,12 +35,12 @@ for k in range(K-1):
         
 # Plot results:
 
-figure(1)
-title('Cluster validity')
-plot(np.arange(K-1)+2, Rand)
-plot(np.arange(K-1)+2, Jaccard)
-plot(np.arange(K-1)+2, NMI)
-legend(['Rand', 'Jaccard', 'NMI'], loc=4)
-show()
+plt.figure(1)
+plt.title('Cluster validity')
+plt.plot(np.arange(K-1)+2, Rand)
+plt.plot(np.arange(K-1)+2, Jaccard)
+plt.plot(np.arange(K-1)+2, NMI)
+plt.legend(['Rand', 'Jaccard', 'NMI'], loc=4)
+plt.show()
 
 print('Ran Exercise 10.1.3')
diff --git a/exercises/02450Toolbox_Python/Scripts/ex10_1_5.py b/exercises/02450Toolbox_Python/Scripts/ex10_1_5.py
index 08e6653fe56d38f593c00541d1fecd9f30aa5187..1fee989a2bd07e02cf2866adaecfed5cac3fa137 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex10_1_5.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex10_1_5.py
@@ -11,15 +11,14 @@ filename = importlib_resources.files("dtuimldmtools").joinpath("data/wildfaces.m
 mat_data = loadmat(filename)
 
 #filename = importlib_resources.files("dtuimldmtools").joinpath("data/digits.mat") #<-- uncomment this for using the digits dataset 
-#mat_data = loadmat('../Data/digits.mat') #<-- uncomment this for using the digits dataset 
 
 X = mat_data['X']
 N, M = X.shape
+
 # Image resolution and number of colors
 x = 40 #<-- change this for using the digits dataset
 y = 40 #<-- change this for using the digits dataset
-c = 3 #<-- change this for using the digits dataset
-
+c = 3  #<-- change this for using the digits dataset
 
 # Number of clusters:
 K = 10
diff --git a/exercises/02450Toolbox_Python/Scripts/ex10_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex10_2_1.py
index d169e6a7f4aa454c47fc23cf2a5c000ab19aaf7c..f843913258ce4c8b12f80d7409061c25ba84705a 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex10_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex10_2_1.py
@@ -1,6 +1,6 @@
 # exercise 10.2.1
 import importlib_resources
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 from scipy.cluster.hierarchy import dendrogram, fcluster, linkage
 from scipy.io import loadmat
 
@@ -27,16 +27,16 @@ Z = linkage(X, method=Method, metric=Metric)
 # Compute and display clusters by thresholding the dendrogram
 Maxclust = 4
 cls = fcluster(Z, criterion="maxclust", t=Maxclust)
-figure(1)
+plt.figure(1)
 clusterplot(X, cls.reshape(cls.shape[0], 1), y=y)
 
 # Display dendrogram
 max_display_levels = 6
-figure(2, figsize=(10, 4))
+plt.figure(2, figsize=(10, 4))
 dendrogram(
     Z, truncate_mode="level", p=max_display_levels, color_threshold=Z[-Maxclust + 1, 2]
 )
 
-show()
+plt.show()
 
 print("Ran Exercise 10.2.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex11_1_1.py
index 575da6722e7b6882f9958d6472dc6e27086594a1..b411cf317f41f7bfc96f0834124e5827bb3d4ca6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_1_1.py
@@ -1,7 +1,7 @@
 # exercise 11.1.1
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.mixture import GaussianMixture
 
@@ -59,9 +59,9 @@ if cov_type.lower() == "diag":
     covs = new_covs
 
 # Plot results:
-figure(figsize=(14, 9))
+plt.figure(figsize=(14, 9))
 clusterplot(X, clusterid=cls, centroids=cds, y=y, covars=covs)
-show()
+plt.show()
 
 ## In case the number of features != 2, then a subset of features most be plotted instead.
 # figure(figsize=(14,9))
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_1_5.py b/exercises/02450Toolbox_Python/Scripts/ex11_1_5.py
index 3f2dfb92a138687dcd0253a825e67aacb8bbb75a..ad5c52cccbfe97baea58d62185acaad255fc7233 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_1_5.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_1_5.py
@@ -1,7 +1,7 @@
 # exercise 11.1.5
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import figure, legend, plot, show, xlabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection
 from sklearn.mixture import GaussianMixture
@@ -68,12 +68,12 @@ for t, K in enumerate(KRange):
 
 # Plot results
 
-figure(1)
-plot(KRange, BIC, "-*b")
-plot(KRange, AIC, "-xr")
-plot(KRange, 2 * CVE, "-ok")
-legend(["BIC", "AIC", "Crossvalidation"])
-xlabel("K")
-show()
+plt.figure(1)
+plt.plot(KRange, BIC, "-*b")
+plt.plot(KRange, AIC, "-xr")
+plt.plot(KRange, 2 * CVE, "-ok")
+plt.legend(["BIC", "AIC", "Crossvalidation"])
+plt.xlabel("K")
+plt.show()
 
 print("Ran Exercise 11.1.5")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex11_2_1.py
index f7b821cd784ccfdd11125e81c44352177fee2b0b..cb98bc2558bd440aeb44e1f6b28e0adde989d755 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_2_1.py
@@ -1,6 +1,6 @@
 # exercise 11_2_1
 import numpy as np
-from matplotlib.pyplot import figure, hist, show
+import matplotlib.pyplot as plt
 
 # Number of data objects
 N = 1000
@@ -27,8 +27,8 @@ for c_id, c_size in enumerate(c_sizes):
 
 
 # Plot histogram of sampled data
-figure()
-hist(X, x)
-show()
+plt.figure()
+plt.hist(X, x)
+plt.show()
 
 print("Ran Exercise 11.2.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_2_2.py b/exercises/02450Toolbox_Python/Scripts/ex11_2_2.py
index c9007f93b3fab95c7bdfbb24cfd754e4eae5fee8..754aef4f327832c9cfc581bef9b88d539d43a613 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_2_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_2_2.py
@@ -1,6 +1,6 @@
 # exercise 11.2.2
 import numpy as np
-from matplotlib.pyplot import figure, hist, plot, show, subplot, title
+import matplotlib.pyplot as plt
 from scipy.stats.kde import gaussian_kde
 
 # Draw samples from mixture of gaussians (as in exercise 11.1.1)
@@ -24,13 +24,13 @@ xe = np.linspace(-10, 10, 100)
 kde = gaussian_kde(X.ravel())
 
 # Plot kernel density estimate
-figure(figsize=(6, 7))
-subplot(2, 1, 1)
-hist(X, x)
-title("Data histogram")
-subplot(2, 1, 2)
-plot(xe, kde.evaluate(xe))
-title("Kernel density estimate")
-show()
+plt.figure(figsize=(6, 7))
+plt.subplot(2, 1, 1)
+plt.hist(X, x)
+plt.title("Data histogram")
+plt.subplot(2, 1, 2)
+plt.plot(xe, kde.evaluate(xe))
+plt.title("Kernel density estimate")
+plt.show()
 
 print("Ran Exercise 11.2.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_2_3.py b/exercises/02450Toolbox_Python/Scripts/ex11_2_3.py
index 0119997e3a3f55e5a352d74f07ae7a7e1f605514..d7024395c0b8423081e092d17e19f042a64b86e3 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_2_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_2_3.py
@@ -1,6 +1,6 @@
 # exercise 11.2.3
 import numpy as np
-from matplotlib.pyplot import figure, hist, plot, show, subplot, title
+import matplotlib.pyplot as plt
 from sklearn.neighbors import NearestNeighbors
 
 # Draw samples from mixture of gaussians (as in exercise 11.1.1)
@@ -39,22 +39,23 @@ knn_avg_rel_density = knn_density / (knn_densityX[i[:, 1:]].sum(axis=1) / K)
 
 
 # Plot KNN density
-figure(figsize=(6, 7))
-subplot(2, 1, 1)
-hist(X, x)
-title("Data histogram")
-subplot(2, 1, 2)
-plot(xe, knn_density)
-title("KNN density")
+plt.figure(figsize=(6, 7))
+plt.subplot(2, 1, 1)
+plt.hist(X, x)
+plt.title("Data histogram")
+plt.subplot(2, 1, 2)
+plt.plot(xe, knn_density)
+plt.title("KNN density")
+
 # Plot KNN average relative density
-figure(figsize=(6, 7))
-subplot(2, 1, 1)
-hist(X, x)
-title("Data histogram")
-subplot(2, 1, 2)
-plot(xe, knn_avg_rel_density)
-title("KNN average relative density")
-
-show()
+plt.figure(figsize=(6, 7))
+plt.subplot(2, 1, 1)
+plt.hist(X, x)
+plt.title("Data histogram")
+plt.subplot(2, 1, 2)
+plt.plot(xe, knn_avg_rel_density)
+plt.title("KNN average relative density")
+
+plt.show()
 
 print("Ran Exercise 11.2.3")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_3_1.py b/exercises/02450Toolbox_Python/Scripts/ex11_3_1.py
index 92b652a956db13981ae4398fcec15e4cdc62c078..8795f596d4cd7181cff9b573810f0ed81c1cbb8b 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_3_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_3_1.py
@@ -1,6 +1,6 @@
 # exercise 11.3.1
 import numpy as np
-from matplotlib.pyplot import bar, figure, show, title
+import matplotlib.pyplot as plt
 from scipy.stats.kde import gaussian_kde
 
 # Draw samples from mixture of gaussians (as in exercise 11.1.1), add outlier
@@ -28,9 +28,9 @@ scores.sort()
 print("The index of the lowest density object: {0}".format(idx[0]))
 
 # Plot kernel density estimate
-figure()
-bar(range(20), scores[:20])
-title("Outlier score")
-show()
+plt.figure()
+plt.bar(range(20), scores[:20])
+plt.title("Outlier score")
+plt.show()
 
 print("Ran Exercise 11.3.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_3_2.py b/exercises/02450Toolbox_Python/Scripts/ex11_3_2.py
index 8b3d919fa92feff3cc6690fa9db987028e72181d..72bf9ffa366a8ee0a32b218ab8126acfbc1c1190 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_3_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_3_2.py
@@ -1,7 +1,7 @@
 # exercise 11.3.2
 
 import numpy as np
-from matplotlib.pyplot import bar, figure, plot, show, title
+import matplotlib.pyplot as plt
 
 from dtuimldmtools import gausKernelDensity
 
@@ -43,17 +43,18 @@ density = density[i]
 print("Lowest density: {0} for data object: {1}".format(density[0], i[0]))
 
 # Plot density estimate of outlier score
-figure(1)
-bar(
+plt.figure(1)
+plt.bar(
     range(20),
     density[:20].reshape(
         -1,
     ),
 )
-title("Density estimate")
-figure(2)
-plot(logP)
-title("Optimal width")
-show()
+plt.title("Density estimate")
+
+plt.figure(2)
+plt.plot(logP)
+plt.title("Optimal width")
+plt.show()
 
 print("Ran Exercise 11.3.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex11_4_1.py b/exercises/02450Toolbox_Python/Scripts/ex11_4_1.py
index 7edc22bc819f81c44826c9ffede44a23261b1cff..a91319f97c5c1b8cae5944e204a208b43cfbf1d6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex11_4_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex11_4_1.py
@@ -1,17 +1,7 @@
 # exercise 11.4.1
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import (
-    bar,
-    cm,
-    figure,
-    imshow,
-    show,
-    subplot,
-    title,
-    xticks,
-    yticks,
-)
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.neighbors import NearestNeighbors
 
@@ -56,19 +46,19 @@ density = density[i].reshape(
 )
 
 # Plot density estimate of outlier score
-figure(1)
-bar(range(20), density[:20])
-title("Density estimate")
+plt.figure(1)
+plt.bar(range(20), density[:20])
+plt.title("Density estimate")
 
 # Plot possible outliers
-figure(2)
+plt.figure(2)
 for k in range(1, 21):
-    subplot(4, 5, k)
-    imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=cm.binary)
-    xticks([])
-    yticks([])
+    plt.subplot(4, 5, k)
+    plt.imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=plt.cm.binary)
+    plt.xticks([])
+    plt.yticks([])
     if k == 3:
-        title("Gaussian Kernel Density: Possible outliers")
+        plt.title("Gaussian Kernel Density: Possible outliers")
 
 
 ### K-neighbors density estimator
@@ -94,18 +84,19 @@ i = dens.argsort()
 dens = dens[i]
 
 # Plot k-neighbor estimate of outlier score (distances)
-figure(3)
-bar(range(20), dens[:20])
-title("KNN density: Outlier score")
+plt.figure(3)
+plt.bar(range(20), dens[:20])
+plt.title("KNN density: Outlier score")
+
 # Plot possible outliers
-figure(4)
+plt.figure(4)
 for k in range(1, 21):
-    subplot(4, 5, k)
-    imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=cm.binary)
-    xticks([])
-    yticks([])
+    plt.subplot(4, 5, k)
+    plt.imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=plt.cm.binary)
+    plt.xticks([])
+    plt.yticks([])
     if k == 3:
-        title("KNN density: Possible outliers")
+        plt.title("KNN density: Possible outliers")
 
 
 ### K-nearest neigbor average relative density
@@ -123,18 +114,18 @@ i_avg_rel = avg_rel_density.argsort()
 avg_rel_density = avg_rel_density[i_avg_rel]
 
 # Plot k-neighbor estimate of outlier score (distances)
-figure(5)
-bar(range(20), avg_rel_density[:20])
-title("KNN average relative density: Outlier score")
+plt.figure(5)
+plt.bar(range(20), avg_rel_density[:20])
+plt.title("KNN average relative density: Outlier score")
 # Plot possible outliers
-figure(6)
+plt.figure(6)
 for k in range(1, 21):
-    subplot(4, 5, k)
-    imshow(np.reshape(X[i_avg_rel[k], :], (16, 16)).T, cmap=cm.binary)
-    xticks([])
-    yticks([])
+    plt.subplot(4, 5, k)
+    plt.imshow(np.reshape(X[i_avg_rel[k], :], (16, 16)).T, cmap=plt.cm.binary)
+    plt.xticks([])
+    plt.yticks([])
     if k == 3:
-        title("KNN average relative density: Possible outliers")
+        plt.title("KNN average relative density: Possible outliers")
 
 ### Distance to 5'th nearest neighbor outlier score
 K = 5
@@ -150,29 +141,30 @@ i = score.argsort()
 score = score[i[::-1]]
 
 # Plot k-neighbor estimate of outlier score (distances)
-figure(7)
-bar(range(20), score[:20])
-title("5th neighbor distance: Outlier score")
+plt.figure(7)
+plt.bar(range(20), score[:20])
+plt.title("5th neighbor distance: Outlier score")
+
 # Plot possible outliers
-figure(8)
+plt.figure(8)
 for k in range(1, 21):
-    subplot(4, 5, k)
-    imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=cm.binary)
-    xticks([])
-    yticks([])
+    plt.subplot(4, 5, k)
+    plt.imshow(np.reshape(X[i[k], :], (16, 16)).T, cmap=plt.cm.binary)
+    plt.xticks([])
+    plt.yticks([])
     if k == 3:
-        title("5th neighbor distance: Possible outliers")
+        plt.title("5th neighbor distance: Possible outliers")
 
 
 # Plot random digits (the first 20 in the data set), for comparison
-figure(9)
+plt.figure(9)
 for k in range(1, 21):
-    subplot(4, 5, k)
-    imshow(np.reshape(X[k, :], (16, 16)).T, cmap=cm.binary)
-    xticks([])
-    yticks([])
+    plt.subplot(4, 5, k)
+    plt.imshow(np.reshape(X[k, :], (16, 16)).T, cmap=plt.cm.binary)
+    plt.xticks([])
+    plt.yticks([])
     if k == 3:
-        title("Random digits from data set")
-show()
+        plt.title("Random digits from data set")
+plt.show()
 
 print("Ran Exercise 11.4.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex1_5_1.py b/exercises/02450Toolbox_Python/Scripts/ex1_5_1.py
index 331faf5803a97830ee9d7cecf781e9791a653416..a5c04165d9633f201329cb867a667621de801849 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex1_5_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex1_5_1.py
@@ -80,3 +80,5 @@ N, M = X.shape
 # Finally, the last variable that we need to have the dataset in the
 # "standard representation" for the course, is the number of classes, C:
 C = len(classNames)
+
+print("Ran 1.5.1 -- loaded the Iris data")
\ No newline at end of file
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py
index 94645345f920ecd84f7bd19453a934aa671f2729..ec2f3488eeca721c9cc8a84dd0e2c22f61a151af 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_1.py
@@ -1,7 +1,6 @@
 # exercise 4.1.1
-
 import numpy as np
-from matplotlib.pyplot import figure, hist, plot, show, subplot, title
+import matplotlib.pyplot as plt 
 
 # Number of samples
 N = 200
@@ -21,10 +20,10 @@ X = np.random.normal(mu, s, N).T
 X = np.random.randn(N).T * s + mu
 
 # Plot the samples and histogram
-figure(figsize=(12, 4))
-title("Normal distribution")
-subplot(1, 2, 1)
-plot(X, ".")
-subplot(1, 3, 3)
-hist(X, bins=nbins)
-show()
+plt.figure(figsize=(12, 4))
+plt.title("Normal distribution")
+plt.subplot(1, 2, 1)
+plt.plot(X, ".")
+plt.subplot(1, 3, 3)
+plt.hist(X, bins=nbins)
+plt.show()
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_2.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_2.py
index df357b391150c468784368654d0665c891415f02..9f613d9590de7e7543594708599b41a9fba0e2ea 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_2.py
@@ -1,7 +1,7 @@
 # exercise 4.1.2
 
 import numpy as np
-from matplotlib.pyplot import figure, hist, plot, show, subplot, title
+import matplotlib.pyplot as plt 
 
 # Number of samples
 N = 200
@@ -20,13 +20,6 @@ X = np.random.normal(mu, s, N).T
 # or equally:
 X = np.random.randn(N).T * s + mu
 
-# Plot the samples and histogram
-figure()
-title("Normal distribution")
-subplot(1, 2, 1)
-plot(X, "x")
-subplot(1, 2, 2)
-hist(X, bins=nbins)
 
 # Compute empirical mean and standard deviation
 mu_ = X.mean()
@@ -37,5 +30,12 @@ print("Theoretical std.dev.: ", s)
 print("Empirical mean: ", mu_)
 print("Empirical std.dev.: ", s_)
 
-show()
+# Plot the samples and histogram
+plt.figure()
+plt.title("Normal distribution")
+plt.subplot(1, 2, 1)
+plt.plot(X, "x")
+plt.subplot(1, 2, 2)
+plt.hist(X, bins=nbins)
+plt.show()
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_3.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_3.py
index 2d45b0111632b202beeeb3fe951775a002a25827..0a6c637b2aeda368d26b7470be3dbf9f44d47545 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_3.py
@@ -1,7 +1,6 @@
 # exercise 4.1.3
-
 import numpy as np
-from matplotlib.pyplot import figure, hist, plot, show, subplot, title
+import matplotlib.pyplot as plt 
 from scipy import stats
 
 # Number of samples
@@ -22,14 +21,14 @@ X = np.random.normal(mu, s, N).T
 X = np.random.randn(N).T * s + mu
 
 # Plot the histogram
-f = figure()
-title("Normal distribution")
-hist(X, bins=nbins, density=True)
+f = plt.figure()
+plt.title("Normal distribution")
+plt.hist(X, bins=nbins, density=True)
 
 # Over the histogram, plot the theoretical probability distribution function:
 x = np.linspace(X.min(), X.max(), 1000)
 pdf = stats.norm.pdf(x, loc=17, scale=2)
-plot(x, pdf, ".", color="red")
+plt.plot(x, pdf, ".", color="red")
 
 # Compute empirical mean and standard deviation
 mu_ = X.mean()
@@ -40,5 +39,5 @@ print("Theoretical std.dev.: ", s)
 print("Empirical mean: ", mu_)
 print("Empirical std.dev.: ", s_)
 
-show()
+plt.show()
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_5.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_5.py
index 09bbc68c4770d92bc5be5b5f7d95e9d47f8a7692..d364cc8f672a4f9d674f67cf5d05453559ab953d 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_5.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_5.py
@@ -1,22 +1,7 @@
 # exercise 4.1.5
 
 import numpy as np
-from matplotlib.pyplot import (
-    cm,
-    colorbar,
-    figure,
-    hist,
-    imshow,
-    plot,
-    show,
-    subplot,
-    suptitle,
-    title,
-    xlabel,
-    xticks,
-    ylabel,
-    yticks,
-)
+import matplotlib.pyplot as plt 
 
 # Number of samples
 N = 1000
@@ -44,25 +29,25 @@ X = np.random.multivariate_normal(mu, S, N)
 
 
 # Plot scatter plot of data
-figure(figsize=(12, 8))
-suptitle("2-D Normal distribution")
+plt.figure(figsize=(12, 8))
+plt.suptitle("2-D Normal distribution")
 
-subplot(1, 2, 1)
-plot(X[:, 0], X[:, 1], "x")
-xlabel("x1")
-ylabel("x2")
-title("Scatter plot of data")
+plt.subplot(1, 2, 1)
+plt.plot(X[:, 0], X[:, 1], "x")
+plt.xlabel("x1")
+plt.ylabel("x2")
+plt.title("Scatter plot of data")
 
-subplot(1, 2, 2)
+plt.subplot(1, 2, 2)
 x = np.histogram2d(X[:, 0], X[:, 1], nbins)
-imshow(x[0], cmap=cm.gray_r, interpolation="None", origin="lower")
-colorbar()
-xlabel("x1")
-ylabel("x2")
-xticks([])
-yticks([])
-title("2D histogram")
-
-show()
+plt.imshow(x[0], cmap=plt.cm.gray_r, interpolation="None", origin="lower")
+plt.colorbar()
+plt.xlabel("x1")
+plt.ylabel("x2")
+plt.xticks([])
+plt.yticks([])
+plt.title("2D histogram")
+
+plt.show()
 
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_6.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_6.py
index e20bf0fc5ecc75bd9f0154cee197ea34c8b6c066..11ce3af61ed99a4c271869455f2a8ee5c368958f 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_6.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_6.py
@@ -1,9 +1,8 @@
 # exercise 4.1.6
-
 import importlib_resources
 import numpy as np
 import scipy.linalg as linalg
-from matplotlib.pyplot import cm, figure, imshow, show, subplot, title, xticks, yticks
+import matplotlib.pyplot as plt 
 from scipy.io import loadmat
 
 filename = importlib_resources.files("dtuimldmtools").joinpath("data/zipdata.mat")
@@ -31,19 +30,19 @@ mu = X.mean(axis=0)
 s = X.std(ddof=1, axis=0)
 S = np.cov(X, rowvar=0, ddof=1)
 
-figure()
-subplot(1, 2, 1)
+plt.figure()
+plt.subplot(1, 2, 1)
 I = np.reshape(mu, (16, 16))
-imshow(I, cmap=cm.gray_r)
-title("Mean")
-xticks([])
-yticks([])
-subplot(1, 2, 2)
+plt.imshow(I, cmap=plt.cm.gray_r)
+plt.title("Mean")
+plt.xticks([])
+plt.yticks([])
+plt.subplot(1, 2, 2)
 I = np.reshape(s, (16, 16))
-imshow(I, cmap=cm.gray_r)
-title("Standard deviation")
-xticks([])
-yticks([])
+plt.imshow(I, cmap=plt.cm.gray_r)
+plt.title("Standard deviation")
+plt.xticks([])
+plt.yticks([])
 
-show()
+plt.show()
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex4_1_7.py b/exercises/02450Toolbox_Python/Scripts/ex4_1_7.py
index 4a1ef5fae30db45fdbcc56c346be6277e0dabdf6..f9d62f0844155350c01867801ecf1e5312429a73 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex4_1_7.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex4_1_7.py
@@ -2,7 +2,7 @@
 
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import cm, figure, imshow, show, subplot, title, xticks, yticks
+import matplotlib.pyplot as plt 
 from scipy.io import loadmat
 
 filename = importlib_resources.files("dtuimldmtools").joinpath("data/zipdata.mat")
@@ -39,15 +39,15 @@ for i in range(ngen):
     Xgen[i] = np.multiply(Xgen[i], s) + mu
 
 # Plot images
-figure()
+plt.figure()
 for k in range(ngen):
-    subplot(2, int(np.ceil(ngen / 2.0)), k + 1)
+    plt.subplot(2, int(np.ceil(ngen / 2.0)), k + 1)
     I = np.reshape(Xgen[k, :], (16, 16))
-    imshow(I, cmap=cm.gray_r)
-    xticks([])
-    yticks([])
+    plt.imshow(I, cmap=plt.cm.gray_r)
+    plt.xticks([])
+    plt.yticks([])
     if k == 1:
-        title("Digits: 1-D Normal")
+        plt.title("Digits: 1-D Normal")
 
 
 # Generate 10 samples from multivariate normal distribution
@@ -58,15 +58,15 @@ Xmvgen = np.random.multivariate_normal(mu, S, ngen)
 
 
 # Plot images
-figure()
+plt.figure()
 for k in range(ngen):
-    subplot(2, int(np.ceil(ngen / 2.0)), k + 1)
+    plt.subplot(2, int(np.ceil(ngen / 2.0)), k + 1)
     I = np.reshape(Xmvgen[k, :], (16, 16))
-    imshow(I, cmap=cm.gray_r)
-    xticks([])
-    yticks([])
+    plt.imshow(I, cmap=plt.cm.gray_r)
+    plt.xticks([])
+    plt.yticks([])
     if k == 1:
-        title("Digits: Multivariate Normal")
+        plt.title("Digits: Multivariate Normal")
 
-show()
+plt.show()
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_1_2.py b/exercises/02450Toolbox_Python/Scripts/ex5_1_2.py
index dc3bcc42b52a6e36dc6a86fb23bbcb338899f270..9c1a724648722004bbecad2bb6106e59b6c9a5c8 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_1_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_1_2.py
@@ -1,7 +1,6 @@
 # exercise 5.1.2
 from os import getcwd
 from platform import system
-
 import matplotlib.pyplot as plt
 import numpy as np
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_1_3.py b/exercises/02450Toolbox_Python/Scripts/ex5_1_3.py
index afd780e7b5a8c32cb2d61c9522010896e8361128..05089b8c5010a4fc9c66b03545ddf972d31dc94b 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_1_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_1_3.py
@@ -2,18 +2,13 @@
 import os
 from os import getcwd
 from platform import system
-
 import matplotlib.pyplot as plt
 import numpy as np
-
-# requires data from exercise 5.1.1
-from ex5_1_1 import *
 from matplotlib.image import imread
 from sklearn import tree
 
-# import graphviz
-# import pydotplus
-
+# requires data from exercise 5.1.1
+from ex5_1_1 import *
 
 # Fit regression tree classifier, Gini split criterion, no pruning
 criterion = "gini"
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_1_5.py b/exercises/02450Toolbox_Python/Scripts/ex5_1_5.py
index 9ce89d96a6422ddf4ff3813baf4775c3fc72411f..3beff795dcac11612d31847d00c62932ff638971 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_1_5.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_1_5.py
@@ -1,15 +1,15 @@
 # exercise 5.1.5
 import os
-
 import importlib_resources
 import numpy as np
 from scipy.io import loadmat
 
-filename = importlib_resources.files("dtuimldmtools").joinpath("data/wine.mat")
 # Load Matlab data file and extract variables of interest
+filename = importlib_resources.files("dtuimldmtools").joinpath("data/wine.mat")
 workingDir = os.getcwd()
 print("Running from: " + workingDir)
 
+# Pick the relevant variables
 mat_data = loadmat(filename)
 X = mat_data["X"]
 y = mat_data["y"].astype(int).squeeze()
@@ -20,7 +20,6 @@ N = mat_data["N"][0, 0]
 attributeNames = [i[0][0] for i in mat_data["attributeNames"]]
 classNames = [j[0] for i in mat_data["classNames"] for j in i]
 
-
 # Remove outliers
 outlier_mask = (X[:, 1] > 20) | (X[:, 7] > 10) | (X[:, 10] > 200)
 valid_mask = np.logical_not(outlier_mask)
@@ -32,4 +31,4 @@ attributeNames = attributeNames[0:11]
 # Update N and M
 N, M = X.shape
 
-print("Ran Exercise 5.1.5")
+print("Ran Exercise 5.1.5 - loading the Wine data")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_1_6.py b/exercises/02450Toolbox_Python/Scripts/ex5_1_6.py
index 6e5902032b4c4616caec4e613fa156b7f150243a..5a3292e360af36ce9a50d6882d284568285f68a6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_1_6.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_1_6.py
@@ -1,14 +1,13 @@
 # exercise 5.1.6
 from os import getcwd
 from platform import system
-
 import matplotlib.pyplot as plt
 import numpy as np
+from matplotlib.image import imread
+from sklearn import tree
 
 # requires data from exercise 5.1.5
 from ex5_1_5 import *
-from matplotlib.image import imread
-from sklearn import tree
 
 # Fit classification tree using, Gini split criterion, no pruning
 criterion = "gini"
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_1.py
index f78c82ce7a010322a1005d0650fb1c574e60338c..d55ea494cf9a0d5f6bd4054ea6b37f1cee9fea78 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_1.py
@@ -1,7 +1,6 @@
 # exercise 5.2.1
-
 import numpy as np
-from matplotlib.pyplot import figure, plot, show, title, xlabel, ylabel
+import matplotlib.pyplot as plt
 
 # Number of data objects
 N = 100
@@ -21,12 +20,11 @@ w1 = 0.01
 y = w0 + w1 * X + eps
 
 # Make a scatter plot
-figure()
-plot(X, y, "o")
-xlabel("X")
-ylabel("y")
-title("Illustration of a linear relation with noise")
-
-show()
+plt.figure()
+plt.plot(X, y, "o")
+plt.xlabel("X")
+plt.ylabel("y")
+plt.title("Illustration of a linear relation with noise")
+plt.show()
 
 print("Ran Exercise 5.2.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_2.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_2.py
index 686fcc14c646a6a764228ec0aeb664dbe031d2ab..8e61ba7e8a65900f06f9aba39677f23270e66436 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_2.py
@@ -2,7 +2,7 @@
 
 import numpy as np
 import sklearn.linear_model as lm
-from matplotlib.pyplot import figure, legend, plot, show, xlabel, ylabel
+import matplotlib.pyplot as plt
 
 # Use dataset as in the previous exercise
 N = 100
@@ -24,15 +24,15 @@ y_est = model.predict(X)
 
 
 # Plot original data and the model output
-f = figure()
+f = plt.figure()
 
-plot(X, y, ".")
-plot(X, y_true, "-")
-plot(X, y_est, "-")
-xlabel("X")
-ylabel("y")
-legend(["Training data", "Data generator", "Regression fit (model)"])
+plt.plot(X, y, ".")
+plt.plot(X, y_true, "-")
+plt.plot(X, y_est, "-")
+plt.xlabel("X")
+plt.ylabel("y")
+plt.legend(["Training data", "Data generator", "Regression fit (model)"])
 
-show()
+plt.show()
 
 print("Ran Exercise 5.2.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_3.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_3.py
index 172a1ced9414d5466ecf046135fefee707d6e726..7bbc8f13ba1bb09b0966d247c6c2d129059603fc 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_3.py
@@ -2,7 +2,7 @@
 
 import numpy as np
 import sklearn.linear_model as lm
-from matplotlib.pyplot import figure, legend, plot, show, xlabel, ylabel, ylim
+import matplotlib.pyplot as plt
 
 # Parameters
 Kd = 5  # no of terms for data generator
@@ -36,14 +36,14 @@ Xme = np.power(Xe, range(1, Km + 1))
 y_est = model.predict(Xme)
 
 # Plot original data and the model output
-f = figure()
-plot(X, y, ".")
-plot(Xe, y_true, "-")
-plot(Xe, y_est, "-")
-xlabel("X")
-ylabel("y")
-ylim(-2, 8)
-legend(
+f = plt.figure()
+plt.plot(X, y, ".")
+plt.plot(Xe, y_true, "-")
+plt.plot(Xe, y_est, "-")
+plt.xlabel("X")
+plt.ylabel("y")
+plt.ylim(-2, 8)
+plt.legend(
     [
         "Training data",
         "Data generator K={0}".format(Kd),
@@ -51,6 +51,6 @@ legend(
     ]
 )
 
-show()
+plt.show()
 
 print("Ran Exercise 5.2.3")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py
index 0790af6dbc80fc6c366a257ea8d2c4c0eddc6e9c..9373768c5c39fad76a9a00e8aca0d6e031a9d770 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_4.py
@@ -1,9 +1,9 @@
 # exercise 5.2.4
 import sklearn.linear_model as lm
+import matplotlib.pyplot as plt
 
 # requires wine data from exercise 5.1.5
 from ex5_1_5 import *
-from matplotlib.pylab import figure, hist, plot, show, subplot, xlabel, ylabel
 
 # Split dataset into features and target vector
 alcohol_idx = attributeNames.index("Alcohol")
@@ -21,14 +21,14 @@ y_est = model.predict(X)
 residual = y_est - y
 
 # Display scatter plot
-figure()
-subplot(2, 1, 1)
-plot(y, y_est, ".")
-xlabel("Alcohol content (true)")
-ylabel("Alcohol content (estimated)")
-subplot(2, 1, 2)
-hist(residual, 40)
-
-show()
+plt.figure()
+plt.subplot(2, 1, 1)
+plt.plot(y, y_est, ".")
+plt.xlabel("Alcohol content (true)")
+plt.ylabel("Alcohol content (estimated)")
+plt.subplot(2, 1, 2)
+plt.hist(residual, 40)
+
+plt.show()
 
 print("Ran Exercise 5.2.4")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py
index 8e44343b6b81236db68eb2642651834cbeaaca69..067537c8bf6a8aebdca690b0adb6cc1b3a2c09f7 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_5.py
@@ -3,7 +3,7 @@ import sklearn.linear_model as lm
 
 # requires data from exercise 5.1.4
 from ex5_1_5 import *
-from matplotlib.pylab import figure, hist, plot, show, subplot, xlabel, ylabel
+import matplotlib.pyplot as plt
 
 # Split dataset into features and target vector
 alcohol_idx = attributeNames.index("Alcohol")
@@ -29,31 +29,31 @@ y_est = model.predict(X)
 residual = y_est - y
 
 # Display plots
-figure(figsize=(12, 8))
+plt.figure(figsize=(12, 8))
 
-subplot(2, 1, 1)
-plot(y, y_est, ".g")
-xlabel("Alcohol content (true)")
-ylabel("Alcohol content (estimated)")
+plt.subplot(2, 1, 1)
+plt.plot(y, y_est, ".g")
+plt.xlabel("Alcohol content (true)")
+plt.ylabel("Alcohol content (estimated)")
 
-subplot(4, 1, 3)
-hist(residual, 40)
+plt.subplot(4, 1, 3)
+plt.hist(residual, 40)
 
-subplot(4, 3, 10)
-plot(Xfa2, residual, ".r")
-xlabel("Fixed Acidity ^2")
-ylabel("Residual")
+plt.subplot(4, 3, 10)
+plt.plot(Xfa2, residual, ".r")
+plt.xlabel("Fixed Acidity ^2")
+plt.ylabel("Residual")
 
-subplot(4, 3, 11)
-plot(Xva2, residual, ".r")
-xlabel("Volatile Acidity ^2")
-ylabel("Residual")
+plt.subplot(4, 3, 11)
+plt.plot(Xva2, residual, ".r")
+plt.xlabel("Volatile Acidity ^2")
+plt.ylabel("Residual")
 
-subplot(4, 3, 12)
-plot(Xfava, residual, ".r")
-xlabel("Fixed*Volatile Acidity")
-ylabel("Residual")
+plt.subplot(4, 3, 12)
+plt.plot(Xfava, residual, ".r")
+plt.xlabel("Fixed*Volatile Acidity")
+plt.ylabel("Residual")
 
-show()
+plt.show()
 
 print("Ran Exercise 5.2.5")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex5_2_6.py b/exercises/02450Toolbox_Python/Scripts/ex5_2_6.py
index bc1af0024eba48832993e2004ba8c44dd7884a27..42cfad10c95355d1f5eb42ec6bfd64e006a889df 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex5_2_6.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex5_2_6.py
@@ -3,7 +3,7 @@ import sklearn.linear_model as lm
 
 # requires data from exercise 5.1.4
 from ex5_1_5 import *
-from matplotlib.pylab import figure, legend, plot, show, xlabel, ylabel, ylim
+import matplotlib.pyplot as plt
 
 # Fit logistic regression model
 
@@ -26,16 +26,16 @@ misclass_rate = np.sum(y_est != y) / float(len(y_est))
 print("\nProbability of given sample being a white wine: {0:.4f}".format(x_class))
 print("\nOverall misclassification rate: {0:.3f}".format(misclass_rate))
 
-f = figure()
+f = plt.figure()
 class0_ids = np.nonzero(y == 0)[0].tolist()
-plot(class0_ids, y_est_white_prob[class0_ids], ".y")
+plt.plot(class0_ids, y_est_white_prob[class0_ids], ".y")
 class1_ids = np.nonzero(y == 1)[0].tolist()
-plot(class1_ids, y_est_white_prob[class1_ids], ".r")
-xlabel("Data object (wine sample)")
-ylabel("Predicted prob. of class White")
-legend(["White", "Red"])
-ylim(-0.01, 1.5)
+plt.plot(class1_ids, y_est_white_prob[class1_ids], ".r")
+plt.xlabel("Data object (wine sample)")
+plt.ylabel("Predicted prob. of class White")
+plt.legend(["White", "Red"])
+plt.ylim(-0.01, 1.5)
 
-show()
+plt.show()
 
 print("Ran Exercise 5.2.6")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex6_1_1.py
index 5103b044bbea6b0879452bdb3e41f0b6cf38a101..3ef7510d445b7a3b69e9e30555ad09c443fd62e6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex6_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex6_1_1.py
@@ -2,7 +2,7 @@
 
 import importlib_resources
 import numpy as np
-from matplotlib.pylab import figure, legend, plot, show, xlabel, ylabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection, tree
 
@@ -42,13 +42,13 @@ for i, t in enumerate(tc):
     misclass_rate_train = sum(y_est_train != y_train) / float(len(y_est_train))
     Error_test[i], Error_train[i] = misclass_rate_test, misclass_rate_train
 
-f = figure()
-plot(tc, Error_train * 100)
-plot(tc, Error_test * 100)
-xlabel("Model complexity (max tree depth)")
-ylabel("Error (%)")
-legend(["Error_train", "Error_test"])
+f = plt.figure()
+plt.plot(tc, Error_train * 100)
+plt.plot(tc, Error_test * 100)
+plt.xlabel("Model complexity (max tree depth)")
+plt.ylabel("Error (%)")
+plt.legend(["Error_train", "Error_test"])
 
-show()
+plt.show()
 
 print("Ran Exercise 6.1.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_1_2.py b/exercises/02450Toolbox_Python/Scripts/ex6_1_2.py
index f5250f775e6d12d621a14dd0150913c63cc4d11a..ff9c87c5893ce20178a5f592f627fa90b4f1eeed 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex6_1_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex6_1_2.py
@@ -2,7 +2,7 @@
 
 import importlib_resources
 import numpy as np
-from matplotlib.pyplot import boxplot, figure, legend, plot, show, xlabel, ylabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection, tree
 
@@ -48,18 +48,18 @@ for train_index, test_index in CV.split(X):
     k += 1
 
 
-f = figure()
-boxplot(Error_test.T)
-xlabel("Model complexity (max tree depth)")
-ylabel("Test error across CV folds, K={0})".format(K))
+f = plt.figure()
+plt.boxplot(Error_test.T)
+plt.xlabel("Model complexity (max tree depth)")
+plt.ylabel("Test error across CV folds, K={0})".format(K))
 
-f = figure()
-plot(tc, Error_train.mean(1))
-plot(tc, Error_test.mean(1))
-xlabel("Model complexity (max tree depth)")
-ylabel("Error (misclassification rate, CV K={0})".format(K))
-legend(["Error_train", "Error_test"])
+f = plt.figure()
+plt.plot(tc, Error_train.mean(1))
+plt.plot(tc, Error_test.mean(1))
+plt.xlabel("Model complexity (max tree depth)")
+plt.ylabel("Error (misclassification rate, CV K={0})".format(K))
+plt.legend(["Error_train", "Error_test"])
 
-show()
+plt.show()
 
 print("Ran Exercise 6.1.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py
index 64f7350e3bec2f9fe166dc3c74b2df93e6e7e244..184c40b399d11301ec426b6b0c641abc74936622 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex6_2_1.py
@@ -2,7 +2,7 @@
 import importlib_resources
 import numpy as np
 import sklearn.linear_model as lm
-from matplotlib.pyplot import clim, figure, plot, show, subplot, title, xlabel, ylabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection
 
@@ -63,16 +63,16 @@ for train_index, test_index in CV.split(X):
         Error_train_fs[k] = np.square(y_train-m.predict(X_train[:,selected_features])).sum()/y_train.shape[0]
         Error_test_fs[k] = np.square(y_test-m.predict(X_test[:,selected_features])).sum()/y_test.shape[0]
     
-        figure(k)
-        subplot(1,2,1)
-        plot(range(1,len(loss_record)), loss_record[1:])
-        xlabel('Iteration')
-        ylabel('Squared error (crossvalidation)')    
+        plt.figure(k)
+        plt.subplot(1,2,1)
+        plt.plot(range(1,len(loss_record)), loss_record[1:])
+        plt.xlabel('Iteration')
+        plt.ylabel('Squared error (crossvalidation)')    
         
-        subplot(1,3,3)
+        plt.subplot(1,3,3)
         bmplot(attributeNames, range(1,features_record.shape[1]), -features_record[:,1:])
-        clim(-1.5,0)
-        xlabel('Iteration')
+        plt.clim(-1.5,0)
+        plt.xlabel('Iteration')
 
     print('Cross validation fold {0}/{1}'.format(k+1,K))
     print('Train indices: {0}'.format(train_index))
@@ -95,12 +95,12 @@ print('- Test error:     {0}'.format(Error_test_fs.mean()))
 print('- R^2 train:     {0}'.format((Error_train_nofeatures.sum()-Error_train_fs.sum())/Error_train_nofeatures.sum()))
 print('- R^2 test:     {0}'.format((Error_test_nofeatures.sum()-Error_test_fs.sum())/Error_test_nofeatures.sum()))
 
-figure(k)
-subplot(1,3,2)
+plt.figure(k)
+plt.subplot(1,3,2)
 bmplot(attributeNames, range(1,Features.shape[1]+1), -Features)
-clim(-1.5,0)
-xlabel('Crossvalidation fold')
-ylabel('Attribute')
+plt.clim(-1.5,0)
+plt.xlabel('Crossvalidation fold')
+plt.ylabel('Attribute')
 
 
 # Inspect selected feature coefficients effect on the entire dataset and
@@ -117,15 +117,15 @@ else:
     y_est= m.predict(X[:,ff])
     residual=y-y_est
     
-    figure(k+1, figsize=(12,6))
-    title('Residual error vs. Attributes for features selected in cross-validation fold {0}'.format(f))
+    plt.figure(k+1, figsize=(12,6))
+    plt.title('Residual error vs. Attributes for features selected in cross-validation fold {0}'.format(f))
     for i in range(0,len(ff)):
-       subplot(2, int( np.ceil(len(ff)/2)), i+1)
-       plot(X[:,ff[i]],residual,'.')
-       xlabel(attributeNames[ff[i]])
-       ylabel('residual error')
+       plt.subplot(2, int( np.ceil(len(ff)/2)), i+1)
+       plt.plot(X[:,ff[i]],residual,'.')
+       plt.xlabel(attributeNames[ff[i]])
+       plt.ylabel('residual error')
     
     
-show()
+plt.show()
 
 print('Ran Exercise 6.2.1')
diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_3_1.py b/exercises/02450Toolbox_Python/Scripts/ex6_3_1.py
index f04ee01ab424588250a67e58079b0b199a2b57bf..4a66909c60c316d83c29dc295feff784843abf08 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex6_3_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex6_3_1.py
@@ -1,18 +1,6 @@
 # exercise 6.3.1
-
 import importlib_resources
-from matplotlib.pyplot import (
-    colorbar,
-    figure,
-    imshow,
-    plot,
-    show,
-    title,
-    xlabel,
-    xticks,
-    ylabel,
-    yticks,
-)
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.metrics import confusion_matrix
 from sklearn.neighbors import KNeighborsClassifier
@@ -34,11 +22,11 @@ C = len(classNames)
 
 
 # Plot the training data points (color-coded) and test data points.
-figure(1)
+plt.figure(1)
 styles = [".b", ".r", ".g", ".y"]
 for c in range(C):
     class_mask = y_train == c
-    plot(X_train[class_mask, 0], X_train[class_mask, 1], styles[c])
+    plt.plot(X_train[class_mask, 0], X_train[class_mask, 1], styles[c])
 
 
 # K-nearest neighbors
@@ -70,25 +58,25 @@ y_est = knclassifier.predict(X_test)
 styles = ["ob", "or", "og", "oy"]
 for c in range(C):
     class_mask = y_est == c
-    plot(X_test[class_mask, 0], X_test[class_mask, 1], styles[c], markersize=10)
-    plot(X_test[class_mask, 0], X_test[class_mask, 1], "kx", markersize=8)
-title("Synthetic data classification - KNN")
+    plt.plot(X_test[class_mask, 0], X_test[class_mask, 1], styles[c], markersize=10)
+    plt.plot(X_test[class_mask, 0], X_test[class_mask, 1], "kx", markersize=8)
+plt.title("Synthetic data classification - KNN")
 
 # Compute and plot confusion matrix
 cm = confusion_matrix(y_test, y_est)
 accuracy = 100 * cm.diagonal().sum() / cm.sum()
 error_rate = 100 - accuracy
-figure(2)
-imshow(cm, cmap="binary", interpolation="None")
-colorbar()
-xticks(range(C))
-yticks(range(C))
-xlabel("Predicted class")
-ylabel("Actual class")
-title(
+plt.figure(2)
+plt.imshow(cm, cmap="binary", interpolation="None")
+plt.colorbar()
+plt.xticks(range(C))
+plt.yticks(range(C))
+plt.xlabel("Predicted class")
+plt.ylabel("Actual class")
+plt.title(
     "Confusion matrix (Accuracy: {0}%, Error Rate: {1}%)".format(accuracy, error_rate)
 )
 
-show()
+plt.show()
 
 print("Ran Exercise 6.3.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex6_3_2.py b/exercises/02450Toolbox_Python/Scripts/ex6_3_2.py
index 2dda8a92f1187a9f45cd7e8d64a21a83fd0c12a4..ea4fc54053d07d055d5b32b1baa2517e8fbe03ba 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex6_3_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex6_3_2.py
@@ -4,7 +4,7 @@ import numpy as np
 
 # requires data from exercise 1.5.1
 from ex1_5_1 import *
-from matplotlib.pyplot import figure, plot, show, xlabel, ylabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection
 from sklearn.neighbors import KNeighborsClassifier
@@ -34,10 +34,10 @@ for train_index, test_index in CV.split(X, y):
     i += 1
 
 # Plot the classification error rate
-figure()
-plot(100 * sum(errors, 0) / N)
-xlabel("Number of neighbors")
-ylabel("Classification error rate (%)")
-show()
+plt.figure()
+plt.plot(100 * sum(errors, 0) / N)
+plt.xlabel("Number of neighbors")
+plt.ylabel("Classification error rate (%)")
+plt.show()
 
 print("Ran Exercise 6.3.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex7_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex7_1_1.py
index ff2ad8158085486b6823fe4219c8bc6f372d21db..9ee8a6f86df8459b356d51a44249aafc35bfd3fc 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex7_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex7_1_1.py
@@ -2,7 +2,7 @@ import numpy as np
 
 # requires data from exercise 1.5.1
 from ex1_5_1 import *
-from matplotlib.pyplot import figure, plot, show, xlabel, ylabel
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection
 from sklearn.neighbors import KNeighborsClassifier
diff --git a/exercises/02450Toolbox_Python/Scripts/ex7_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex7_2_1.py
index 4dd2032fddc1ea1734465ef997a53ba4e5e4ac76..2fd04cb569f20c30e85a52cf0e3ac83e71a119f5 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex7_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex7_2_1.py
@@ -5,9 +5,7 @@ import sklearn.tree
 
 # requires data from exercise 1.5.1
 from ex5_1_5 import *
-from matplotlib.pyplot import figure, plot, show, xlabel, ylabel
 from sklearn import model_selection
-from sklearn.neighbors import KNeighborsClassifier
 
 X, y = X[:, :10], X[:, 10:]
 # This script crates predictions from three KNN classifiers using cross-validation
@@ -41,3 +39,5 @@ CI = st.t.interval(
     1 - alpha, len(z) - 1, loc=np.mean(z), scale=st.sem(z)
 )  # Confidence interval
 p = 2 * st.t.cdf(-np.abs(np.mean(z)) / st.sem(z), df=len(z) - 1)  # p-value
+
+# Note: You may want to print the values here!
\ No newline at end of file
diff --git a/exercises/02450Toolbox_Python/Scripts/ex8_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex8_1_1.py
index db4639dec1eb28c3485c874c408601ae8355eefd..e29fc9071f856cdf53209f24f47a0ec96245430d 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex8_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex8_1_1.py
@@ -3,18 +3,7 @@
 import importlib_resources
 import numpy as np
 import sklearn.linear_model as lm
-from matplotlib.pylab import (
-    figure,
-    grid,
-    legend,
-    loglog,
-    semilogx,
-    show,
-    subplot,
-    title,
-    xlabel,
-    ylabel,
-)
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn import model_selection
 
@@ -121,25 +110,25 @@ for train_index, test_index in CV.split(X, y):
 
     # Display the results for the last cross-validation fold
     if k == K - 1:
-        figure(k, figsize=(12, 8))
-        subplot(1, 2, 1)
-        semilogx(lambdas, mean_w_vs_lambda.T[:, 1:], ".-")  # Don't plot the bias term
-        xlabel("Regularization factor")
-        ylabel("Mean Coefficient Values")
-        grid()
+        plt.figure(k, figsize=(12, 8))
+        plt.subplot(1, 2, 1)
+        plt.semilogx(lambdas, mean_w_vs_lambda.T[:, 1:], ".-")  # Don't plot the bias term
+        plt.xlabel("Regularization factor")
+        plt.ylabel("Mean Coefficient Values")
+        plt.grid()
         # You can choose to display the legend, but it's omitted for a cleaner
         # plot, since there are many attributes
         # legend(attributeNames[1:], loc='best')
 
-        subplot(1, 2, 2)
-        title("Optimal lambda: 1e{0}".format(np.log10(opt_lambda)))
-        loglog(
+        plt.subplot(1, 2, 2)
+        plt.title("Optimal lambda: 1e{0}".format(np.log10(opt_lambda)))
+        plt.loglog(
             lambdas, train_err_vs_lambda.T, "b.-", lambdas, test_err_vs_lambda.T, "r.-"
         )
-        xlabel("Regularization factor")
-        ylabel("Squared error (crossvalidation)")
-        legend(["Train error", "Validation error"])
-        grid()
+        plt.xlabel("Regularization factor")
+        plt.ylabel("Squared error (crossvalidation)")
+        plt.legend(["Train error", "Validation error"])
+        plt.grid()
 
     # To inspect the used indices, use these print statements
     # print('Cross validation fold {0}/{1}:'.format(k+1,K))
@@ -148,7 +137,7 @@ for train_index, test_index in CV.split(X, y):
 
     k += 1
 
-show()
+plt.show()
 # Display results
 print("Linear regression without feature selection:")
 print("- Training error: {0}".format(Error_train.mean()))
diff --git a/exercises/02450Toolbox_Python/Scripts/ex8_3_1.py b/exercises/02450Toolbox_Python/Scripts/ex8_3_1.py
index e926d6751a9fc7b99aa786d1b217aa55a9a0e6d8..d8e8c2c8b7e72964d8970d7cfa3648e8ec25cde8 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex8_3_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex8_3_1.py
@@ -2,7 +2,7 @@
 import importlib_resources
 import numpy as np
 import torch
-from matplotlib.pyplot import figure, show, title
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 
 from dtuimldmtools import dbplotf, train_neural_net, visualize_decision_boundary
@@ -67,12 +67,12 @@ print(
 predict = lambda x: (
     torch.max(net(torch.tensor(x, dtype=torch.float)), dim=1)[1]
 ).data.numpy()
-figure(1, figsize=(9, 9))
+plt.figure(1, figsize=(9, 9))
 visualize_decision_boundary(
     predict, [X_train, X_test], [y_train, y_test], attributeNames, classNames
 )
-title("ANN decision boundaries")
+plt.title("ANN decision boundaries")
 
-show()
+plt.show()
 
 print("Ran Exercise 8.3.1")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex8_3_2.py b/exercises/02450Toolbox_Python/Scripts/ex8_3_2.py
index 958582f5e62fd5a78cc315195acd666fee79560c..6c2b80de363533a13181690a10b43fcb7f12c4d6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex8_3_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex8_3_2.py
@@ -2,7 +2,7 @@
 import importlib_resources
 import numpy as np
 import sklearn.linear_model as lm
-from matplotlib.pyplot import figure, show, title
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 
 from dtuimldmtools import dbplotf, train_neural_net, visualize_decision_boundary
@@ -24,7 +24,7 @@ classNames = [name[0][0] for name in mat_data["classNames"]]
 
 N, M = X.shape
 C = len(classNames)
-# %% Model fitting and prediction
+# Model fitting and prediction
 
 # Multinomial logistic regression
 logreg = lm.LogisticRegression(
@@ -43,12 +43,12 @@ print(
 )
 
 predict = lambda x: np.argmax(logreg.predict_proba(x), 1)
-figure(2, figsize=(9, 9))
+plt.figure(2, figsize=(9, 9))
 visualize_decision_boundary(
     predict, [X_train, X_test], [y_train, y_test], attributeNames, classNames
 )
-title("LogReg decision boundaries")
+plt.title("LogReg decision boundaries")
 
-show()
+plt.show()
 
 print("Ran Exercise 8.3.2")
diff --git a/exercises/02450Toolbox_Python/Scripts/ex8_3_3.py b/exercises/02450Toolbox_Python/Scripts/ex8_3_3.py
index 683f61bba90a3285c0dd4d052ad0b9879485093c..64131f60e16cb65d33ace5529fc802ba680c5e2f 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex8_3_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex8_3_3.py
@@ -1,7 +1,8 @@
 # exercise 8.3.3 Fit regularized multinomial regression
 import importlib_resources
-import matplotlib.pyplot as plt
+from sklearn.linear_model import LogisticRegression
 import numpy as np
+import matplotlib.pyplot as plt
 import sklearn.linear_model as lm
 from scipy.io import loadmat
 
diff --git a/exercises/02450Toolbox_Python/Scripts/ex9_1_1.py b/exercises/02450Toolbox_Python/Scripts/ex9_1_1.py
index fcad65118964c00311958383913ba49cf5d06750..3210861da52fdd999ec69f9650ed7f7ca7e323f6 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex9_1_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex9_1_1.py
@@ -1,11 +1,12 @@
 # exercise 9.1.1
 import importlib_resources
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 import numpy as np
 from scipy.io import loadmat
-from dtuimldmtools import BinClassifierEnsemble, bootstrap, dbplot, dbprobplot
 from sklearn.linear_model import LogisticRegression
 
+from dtuimldmtools import BinClassifierEnsemble, bootstrap, dbplot, dbprobplot
+
 filename = importlib_resources.files("dtuimldmtools").joinpath("data/synth5.mat")
 
 # Load Matlab data file and extract variables of interest
@@ -53,9 +54,9 @@ ErrorRate = (y!=y_est_ensemble).sum(dtype=float)/N
 print('Error rate: {:3.2f}%'.format(ErrorRate*100))
 
 ce = BinClassifierEnsemble(logits)
-figure(1); dbprobplot(ce, X, y, 'auto', resolution=200)
-figure(2); dbplot(ce, X, y, 'auto', resolution=200)
+plt.figure(1); dbprobplot(ce, X, y, 'auto', resolution=200)
+plt.figure(2); dbplot(ce, X, y, 'auto', resolution=200)
 
-show()
+plt.show()
 
 print('Ran Exercise 9.1.1')
\ No newline at end of file
diff --git a/exercises/02450Toolbox_Python/Scripts/ex9_1_3.py b/exercises/02450Toolbox_Python/Scripts/ex9_1_3.py
index 296caf5fbe200202623b3c26168abc7da860f210..34b6b84ad9d6e8cf926574d6e080a704cf3d1e1e 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex9_1_3.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex9_1_3.py
@@ -1,8 +1,9 @@
 # exercise 9.1.3
 import importlib_resources
-from matplotlib.pyplot import figure, show
+from sklearn.linear_model import LogisticRegression
 from scipy.io import loadmat
 from dtuimldmtools import dbplot, dbprobplot
+import matplotlib.pyplot as plt
 from sklearn.ensemble import RandomForestClassifier
 
 filename = importlib_resources.files("dtuimldmtools").joinpath("data/synth7.mat")
@@ -31,9 +32,9 @@ ErrorRate = (y!=y_est).sum(dtype=float)/N
 print('Error rate: {:.2f}%'.format(ErrorRate*100))    
 
 # Plot decision boundaries    
-figure(1); dbprobplot(rf_classifier, X, y, 'auto', resolution=400)
-figure(2); dbplot(rf_classifier, X, y, 'auto', resolution=400)
+plt.figure(1); dbprobplot(rf_classifier, X, y, 'auto', resolution=400)
+plt.figure(2); dbplot(rf_classifier, X, y, 'auto', resolution=400)
 
-show()
+plt.show()
 
 print('Ran Exercise 9.1.3')
\ No newline at end of file
diff --git a/exercises/02450Toolbox_Python/Scripts/ex9_2_1.py b/exercises/02450Toolbox_Python/Scripts/ex9_2_1.py
index a582f7f20c63fa72e58d5c074706d740da195293..ca2cebbf6c651a9c0e78d6e5ff7e474a1edf8e3d 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex9_2_1.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex9_2_1.py
@@ -1,6 +1,6 @@
 # exercise 9.2.1
 import importlib_resources
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.model_selection import StratifiedKFold
 from sklearn.linear_model import LogisticRegression
@@ -34,14 +34,14 @@ for train_index, test_index in CV.split(X,y):
     y_test_est = logit_classifier.predict(X_test).T
     p = logit_classifier.predict_proba(X_test)[:,1].T
 
-    figure(k)
+    plt.figure(k)
     rocplot(p, y_test)
 
-    figure(k+1)
+    plt.figure(k+1)
     confmatplot(y_test,y_test_est)
 
     k+=2
     
-show()    
+plt.show()    
 
 print('Ran Exercise 9.2.1')
\ No newline at end of file
diff --git a/exercises/02450Toolbox_Python/Scripts/ex9_2_2.py b/exercises/02450Toolbox_Python/Scripts/ex9_2_2.py
index 138ce3dea61637541f17fb2b3820f898abb838ab..d3d66763b18db8237c643d2aa4c4e10f8a324fac 100644
--- a/exercises/02450Toolbox_Python/Scripts/ex9_2_2.py
+++ b/exercises/02450Toolbox_Python/Scripts/ex9_2_2.py
@@ -1,6 +1,6 @@
 # exercise 9.2.2
 import importlib_resources
-from matplotlib.pyplot import figure, show
+import matplotlib.pyplot as plt
 from scipy.io import loadmat
 from sklearn.model_selection import StratifiedKFold
 from sklearn.linear_model import LogisticRegression
@@ -38,12 +38,12 @@ for train_index, test_index in CV.split(X,y):
     y_test_est = logit_classifier.predict(X_test).T
     p = logit_classifier.predict_proba(X_test)[:,1].T
 
-    figure(k)
+    plt.figure(k)
     rocplot(p,y_test)
 
-    figure(k+1)
+    plt.figure(k+1)
     confmatplot(y_test,y_test_est)
 
     k+=2
     
-show()    
\ No newline at end of file
+plt.show()    
\ No newline at end of file