diff --git a/Week04/QUIZ_week4_solution.ipynb b/Week04/QUIZ_week4_solution.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..e5553e21dbeff739e764b89e096d287cf7bb6a50
--- /dev/null
+++ b/Week04/QUIZ_week4_solution.ipynb
@@ -0,0 +1,137 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "608c7b2a",
+   "metadata": {},
+   "source": [
+    "# QUIZ WEEK 4 SOLUTIONS"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a4d9315d",
+   "metadata": {},
+   "source": [
+    "## Question 1, features size"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "56f027e9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "features_size = 2394000\n"
+     ]
+    }
+   ],
+   "source": [
+    "p = 5\n",
+    "h = 256\n",
+    "w = 384\n",
+    "\n",
+    "features_size = (h - p + 1) * (w - p + 1) * (p**2)\n",
+    "print(f'{features_size = }')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0649178f",
+   "metadata": {},
+   "source": [
+    "## Question 2, cluster probability"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "4329d615",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "cluster_probability = 0.2603369065849923\n"
+     ]
+    }
+   ],
+   "source": [
+    "cluster_probability = 1 - 483/653\n",
+    "print(f'{cluster_probability = }')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3179e4d3",
+   "metadata": {},
+   "source": [
+    "## Question 3, dominant segmentation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "ca50755a",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "dominant_segmentation = 3\n"
+     ]
+    }
+   ],
+   "source": [
+    "# dominant semgentation\n",
+    "import numpy as np\n",
+    "\n",
+    "P1 = np.array([[0.01, 0.11, 0.21, 0.31, 0.41],\n",
+    "    [0.04, 0.14, 0.24, 0.34, 0.44],\n",
+    "    [0.07, 0.17, 0.27, 0.37, 0.47]])\n",
+    "P2 = np.array([[0.23, 0.26, 0.29, 0.32, 0.35],\n",
+    "    [0.30, 0.33, 0.36, 0.39, 0.42],\n",
+    "    [0.37, 0.40, 0.43, 0.46, 0.49]])\n",
+    "\n",
+    "P = np.stack([P1, P2, 1-(P1+P2)])\n",
+    "S = np.argmax(P, axis=0) + 1  # segmentation into 1, 2, 3\n",
+    "counts = np.unique(S, return_counts=True)[1]  # how many times each label occurs\n",
+    "dominant_segmentation = np.argmax(counts) + 1\n",
+    "\n",
+    "print(f'{dominant_segmentation = }')\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.12"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "b35caa3764096924ce573458b7678f48285fe4447fc77b92472b406a014fb35c"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/Week04/feature_segmentatation_prob_smooth.py b/Week04/feature_segmentatation_prob_smooth.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3650aac1296a26db70ad23a43e847b40628c402
--- /dev/null
+++ b/Week04/feature_segmentatation_prob_smooth.py
@@ -0,0 +1,148 @@
+#%%
+# PREPARE
+"""
+Vedrana Andersen Dahl (vand@dtu.dk) 
+Anders Bjorholm Dahl (abda@dtu.dk)
+"""
+
+import skimage.io
+import numpy as np
+import matplotlib.pyplot as plt
+import sklearn.cluster
+import local_features as lf
+import scipy.ndimage
+
+def ind2labels(ind):
+    """ Helper function for transforming uint8 image into labeled image."""
+    return np.unique(ind, return_inverse=True)[1].reshape(ind.shape)
+
+path = '../data/week4/3labels/' # Change path to your directory
+
+#%% 
+# READ IN IMAGES
+training_image = skimage.io.imread(path + 'training_image.png')
+training_image = training_image.astype(float)
+training_labels = skimage.io.imread(path + 'training_labels.png')
+
+training_labels = ind2labels(training_labels)
+nr_labels = np.max(training_labels)+1 # number of labels in the training image
+
+fig, ax = plt.subplots(1, 2)
+ax[0].imshow(training_image, cmap=plt.cm.gray)
+ax[0].set_title('training image')
+ax[1].imshow(training_labels)
+ax[1].set_title('labels for training image')
+fig.tight_layout()
+plt.show()
+
+#%% 
+# TRAIN THE MODEL
+
+sigma = [1, 2 , 3]
+features = lf.get_gauss_feat_multi(training_image, sigma)
+features = features.reshape((features.shape[0], features.shape[1]*features.shape[2]))
+labels = training_labels.ravel()
+
+nr_keep = 15000 # number of features randomly picked for clustering 
+keep_indices = np.random.permutation(np.arange(features.shape[0]))[:nr_keep]
+
+features_subset = features[keep_indices]
+labels_subset = labels[keep_indices]
+
+nr_clusters = 1000 # number of feature clusters
+# for speed, I use mini-batches
+kmeans = sklearn.cluster.MiniBatchKMeans(n_clusters=nr_clusters, 
+                                         batch_size=2*nr_clusters, 
+                                         n_init='auto')
+kmeans.fit(features_subset)
+assignment = kmeans.labels_
+
+edges = np.arange(nr_clusters + 1) - 0.5 # histogram edges halfway between integers
+hist = np.zeros((nr_clusters, nr_labels))
+for l in range(nr_labels):
+    hist[:, l] = np.histogram(assignment[labels_subset == l], bins=edges)[0]
+sum_hist = np.sum(hist, axis=1)
+cluster_probabilities = hist/(sum_hist.reshape(-1, 1))
+
+fig, ax = plt.subplots(2, 1)
+legend_label = [f'label {x}' for x in range(nr_labels)]
+
+ax[0].plot(hist, '.', alpha=0.5, markersize=3)
+ax[0].set_xlabel('cluster id')
+ax[0].set_ylabel('number of features in cluster')
+ax[0].legend(legend_label)
+ax[0].set_title('features in clusters per label')
+ax[1].plot(cluster_probabilities, '.', alpha=0.5, markersize=3)
+ax[1].set_xlabel('cluster id')
+ax[1].set_ylabel('label probability for cluster')
+ax[1].legend(legend_label)
+ax[1].set_title('cluster probabilities')
+fig.tight_layout()
+plt.show()
+
+# Finished training
+
+#%% 
+# USE THE MODEL
+testing_image = skimage.io.imread(path + 'testing_image.png')
+testing_image = testing_image.astype(float)
+
+features_testing = lf.get_gauss_feat_multi(testing_image, sigma)
+features_testing = features_testing.reshape((features_testing.shape[0], features_testing.shape[1]*features_testing.shape[2]))
+labels = training_labels.ravel()
+
+assignment_testing = kmeans.predict(features_testing)
+
+probability_image = np.zeros((assignment_testing.size, nr_labels))
+for l in range(nr_labels):
+    probability_image[:, l] = cluster_probabilities[assignment_testing, l]
+probability_image = probability_image.reshape(testing_image.shape + (nr_labels, ))
+
+P_rgb = np.zeros(probability_image.shape[0:2]+(3, ))
+k = min(nr_labels, 3)
+P_rgb[:, :, :k] = probability_image[:, :, :k]
+
+fig, ax = plt.subplots(1, 2)
+ax[0].imshow(testing_image, cmap=plt.cm.gray)
+ax[0].set_title('testing image')
+ax[1].imshow(P_rgb)
+ax[1].set_title('probabilities for testing image as RGB')
+fig.tight_layout()
+plt.show()
+
+#%%
+# SMOOTH PROBABILITY MAP
+
+sigma = 3 # Gaussian smoothing parameter
+
+seg_im_max = np.argmax(P_rgb, axis = 2)
+c = np.eye(P_rgb.shape[2])
+P_rgb_max = c[seg_im_max]
+
+probability_smooth = np.zeros(probability_image.shape)
+for i in range(0, probability_image.shape[2]):
+    probability_smooth[:, :, i] = scipy.ndimage.gaussian_filter(probability_image[:, :, i], sigma, order=0)
+seg_im_smooth = np.argmax(probability_smooth, axis=2)
+
+probability_smooth_max = c[seg_im_smooth]
+
+P_rgb_smooth = np.zeros(probability_smooth_max.shape[0:2]+(3, ))
+k = min(nr_labels, 3)
+P_rgb_smooth[:, :, :k] = probability_smooth[:, :, :k]
+P_rgb_smooth_max = np.zeros(probability_smooth_max.shape[0:2]+(3, ))
+P_rgb_smooth_max[:, :, :k] = probability_smooth_max[:, :, :k]
+
+# Display result
+fig, ax = plt.subplots(2, 4, sharex=True, sharey=True)
+ax[0][0].imshow(P_rgb[:, :, 0])
+ax[0][1].imshow(P_rgb[:, :, 1])
+ax[0][2].imshow(P_rgb[:, :, 2])
+ax[0][3].imshow(P_rgb_max)
+ax[1][0].imshow(P_rgb_smooth[:, :, 0])
+ax[1][1].imshow(P_rgb_smooth[:, :, 1])
+ax[1][2].imshow(P_rgb_smooth[:, :, 2])
+ax[1][3].imshow(P_rgb_smooth_max)
+fig.tight_layout()
+plt.show()
+
+# %%
diff --git a/Week04/local_features.py b/Week04/local_features.py
new file mode 100644
index 0000000000000000000000000000000000000000..807ac9d003e7a60777aee50031574b89c855a3ba
--- /dev/null
+++ b/Week04/local_features.py
@@ -0,0 +1,142 @@
+"""
+Helping functions for extracting features from images
+
+Vedrana Andersen Dahl (vand@dtu.dk) 
+Anders Bjorholm Dahl (abda@dtu.dk)
+"""
+
+import numpy as np
+import scipy.ndimage
+
+def get_gauss_feat_im(im, sigma=1, normalize=True):
+    """Gauss derivative feaures for every image pixel.
+    Arguments:
+        image: a 2D image, shape (r, c).
+        sigma: standard deviation for Gaussian derivatives.
+        normalize: flag indicating normalization of features.
+    Returns:
+        imfeat: 3D array of size (r, c, 15) with a 15-dimentional feature
+             vector for every pixel in the image.
+    Author: vand@dtu.dk, 2020
+    """
+      
+    r, c = im.shape
+    imfeat = np.zeros((r, c, 15))
+    imfeat[:, :, 0] = scipy.ndimage.gaussian_filter(im, sigma, order=0)
+    imfeat[:, :, 1] = scipy.ndimage.gaussian_filter(im, sigma, order=[0, 1])
+    imfeat[:, :, 2] = scipy.ndimage.gaussian_filter(im, sigma, order=[1, 0])
+    imfeat[:, :, 3] = scipy.ndimage.gaussian_filter(im, sigma, order=[0, 2])
+    imfeat[:, :, 4] = scipy.ndimage.gaussian_filter(im, sigma, order=[1, 1])
+    imfeat[:, :, 5] = scipy.ndimage.gaussian_filter(im, sigma, order=[2, 0])
+    imfeat[:, :, 6] = scipy.ndimage.gaussian_filter(im, sigma, order=[0, 3])
+    imfeat[:, :, 7] = scipy.ndimage.gaussian_filter(im, sigma, order=[1, 2])
+    imfeat[:, :, 8] = scipy.ndimage.gaussian_filter(im, sigma, order=[2, 1])
+    imfeat[:, :, 9] = scipy.ndimage.gaussian_filter(im, sigma, order=[3, 0])
+    imfeat[:, :, 10] = scipy.ndimage.gaussian_filter(im, sigma, order=[0, 4])
+    imfeat[:, :, 11] = scipy.ndimage.gaussian_filter(im, sigma, order=[1, 3])
+    imfeat[:, :, 12] = scipy.ndimage.gaussian_filter(im, sigma, order=[2, 2])
+    imfeat[:, :, 13] = scipy.ndimage.gaussian_filter(im, sigma, order=[3, 1])
+    imfeat[:, :, 14] = scipy.ndimage.gaussian_filter(im, sigma, order=[4, 0])
+
+    if normalize:
+        imfeat -= np.mean(imfeat, axis=(0, 1))
+        im_std = np.std(imfeat, axis=(0, 1))
+        im_std[im_std<10e-10] = 1
+        imfeat /= im_std
+    
+    return imfeat
+
+def get_gauss_feat_multi(im, sigma = [1, 2, 4], normalize = True):
+    '''Multi-scale Gauss derivative feaures for every image pixel.
+    Arguments:
+        image: a 2D image, shape (r, c).
+        sigma: list of standard deviations for Gaussian derivatives.
+        normalize: flag indicating normalization of features.
+    Returns:
+        imfeat: a a 3D array of size (r*c, n_scale, 15) with n_scale features in each pixels, and
+             n_scale is length of sigma. Each pixel contains a feature vector and feature
+             image is size (r, c, 15*n_scale).
+    Author: abda@dtu.dk, 2021
+
+    '''
+    imfeats = []
+    for i in range(0, len(sigma)):
+        feat = get_gauss_feat_im(im, sigma[i], normalize)
+        imfeats.append(feat.reshape(-1, feat.shape[2]))
+    
+    imfeats = np.asarray(imfeats).transpose(1, 0, 2)
+    return imfeats
+
+
+def im2col(im, patch_size=[3, 3], stepsize=1):
+    """Rearrange image patches into columns
+    Arguments:
+        image: a 2D image, shape (r, c).
+        patch size: size of extracted paches.
+        stepsize: patch step size.
+    Returns:
+        patches: a 2D array which in every column has a patch associated 
+            with one image pixel. For stepsize 1, number of returned column 
+            is (r-patch_size[0]+1)*(c-patch_size[0]+1) due to bounary. The 
+            length of columns is pathc_size[0]*patch_size[1].
+    """
+    
+    r, c = im.shape
+    s0, s1 = im.strides    
+    nrows =r - patch_size[0] + 1
+    ncols = c - patch_size[1] + 1
+    shp = patch_size[0], patch_size[1], nrows, ncols
+    strd = s0, s1, s0, s1
+
+    out_view = np.lib.stride_tricks.as_strided(im, shape=shp, strides=strd)
+    return out_view.reshape(patch_size[0]*patch_size[1], -1)[:, ::stepsize]
+
+
+def ndim2col(im, block_size=[3, 3], stepsize=1):
+    """Rearrange image blocks into columns for N-D image (e.g. RGB image)"""""
+    if(im.ndim == 2):
+        return im2col(im, block_size, stepsize)
+    else:
+        r, c, l = im.shape
+        patches = np.zeros((l * block_size[0] * block_size[1], 
+                            (r - block_size[0] + 1) * (c - block_size[1] + 1)))
+        for i in range(l):
+            patches[i * block_size[0] * block_size[1] : (i+1) * block_size[0] * block_size[1], 
+                    :] = im2col(im[:, :, i], block_size, stepsize)
+        return patches
+
+#%%
+import skimage.io
+import matplotlib.pyplot as plt
+
+if __name__ == '__main__':
+    
+    filename = '../../../../Data/week3/3labels/training_image.png'
+    I = skimage.io.imread(filename)
+    I = I.astype(float)
+    
+    
+    # features based on gaussian derivatives
+    sigma = 1;
+    gf = get_gauss_feat_im(I, sigma)
+    
+    fig, ax = plt.subplots(3, 5)
+    for j in range(5):
+        for i in range(3):
+            ax[i][j].imshow(gf[:, :, 5 * i + j], cmap='jet')
+            print(np.std(gf[:, :, 5 * i + j]))
+            ax[i][j].set_title(f'layer {5 * i + j}')
+            
+            
+    # features based on image patches
+    I = I[300:320, 400:420] # smaller image such that we can see the difference in layers
+    pf = im2col(I, [3, 3])
+    pf = pf.reshape((9, I.shape[0] - 2, I.shape[1] - 2))
+            
+    fig, ax = plt.subplots(3, 3)
+    for j in range(3):
+        for i in range(3):
+            ax[i][j].imshow(pf[3 * i + j], cmap='jet')
+            ax[i][j].set_title(f'layer {3 * i + j}')
+   
+