diff --git a/README.md b/README.md
index ed0b271fc609ef263f9e6e12604cbe184449a96f..3502260126e1eacab11d668fff69333949b198db 100644
--- a/README.md
+++ b/README.md
@@ -18,9 +18,9 @@ as the [official repo](https://github.com/rcdaudt/fully_convolutional_change_det
   scipy==1.3.1  
   tqdm==4.35.0  
 
-Tested on Python 3.7.4, Ubuntu 16.04
+Tested on Python 3.7.4, Ubuntu 16.04 and Python 3.6.8, Windows 10.
 
-# Basic Usage
+# Basic usage
 
 ```bash
 # The network definition scripts are from the original repo
@@ -30,7 +30,7 @@ mkdir exp
 cd src
 ```
 
-In `src/constants.py`, change the dataset directories to your own. In `config_base.yaml`, feel free to modify the configurations.
+In `src/constants.py`, change the dataset directories to your own. In `config_base.yaml`, feel free to change the configurations.
 
 For training, try
 
@@ -41,12 +41,73 @@ python train.py train --exp-config ../config_base.yaml
 For evaluation, try
 
 ```bash
-python train.py val --exp-config ../config_base.yaml --resume path_to_checkpoint
+python train.py val --exp-config ../config_base.yaml --resume path_to_checkpoint --save-on
 ```
 
-You can find the checkpoints in `exp/base/weights/`, the log files in `exp/base/logs`, and the output change maps in `exp/outs`.
+You can find the checkpoints in `exp/base/weights/`, the log files in `exp/base/logs`, and the output change maps in `exp/base/outs`.
+
+# Train on Air Change dataset and OSCD dataset
+
+To carry out a full training on these two datasets and with all three architectures, run the `train9.sh` script under the root folder of this repo.
+```bash
+. ./train9.sh
+```
+
+And check the results in different subdirectories of `./exp/`. 
+
+# Create your own configuration file
+
+During scientific research, it is common case that we have to do a lot of experiments with different settings, and that's why we need the configuration files to better manage those settings. In this repo, you can create a `yaml` file under the naming convention below:
+
+`config_TAG{_SUFFIX}.yaml`
+
+Those in the curly braces can be omitted. `TAG` usually stands for an experiment group. For example, a set of experiments for an architecture, a dataset, etc. It will be the name of the subdirectory that holds all the checkpoints, log files, and output images. `SUFFIX` can be used to distinguish different experiments in an experiment group. If it is specified, the generated files of this experiment will be tagged with `SUFFIX` in their file names. In plain English, `TAG1` and `TAG2` have major differences, while `SUFFIX1` and `SUFFIX2` of the same `TAG` share most of the configurations. By combining `TAG` and `SUFFIX`, it is convenient for both coarse-grained and find-grained control of experimental configurations.
+
+Here is an example to help you understand. Suppose I'm going to finish my experiments on two datasets, OSCD and Lebedev, and I'm not sure which batch size achieves best performance. So I create these 5 config files.
+```
+config_OSCD_bs4.yaml
+config_OSCD_bs8.yaml
+config_OSCD_bs16.yaml
+config_Lebedev_bs16.yaml
+config_Lebedev_bs32.yaml
+```
+
+After training, I get my `exp/` folder like this:
+
+```
+-exp/
+--OSCD/
+---weights/
+----model_best_bs4.pth
+----model_best_bs8.pth
+----model_best_bs16.pth
+---outs/
+---logs/
+---config_OSCD_bs4.yaml
+---config_OSCD_bs8.yaml
+---config_OSCD_bs16.yaml
+--Lebedev/
+---weights/
+----model_best_bs16.pth
+----model_best_bs32.pth
+---outs/
+---logs/
+---config_Lebedev_bs16.yaml
+---config_Lebedev_bs32.yaml
+```
+
+Now the experiment results are organized in a more structured way, and I think it would be a little bit easier to collect the statistics. Also, since the historical experiments are arranged in neat order, you will soon remember what you'd done when you come back to these results, even after a long time.
+
+Alternatively, you can configure from the command line. This can be useful when there is only minor change between two single runs, because the configuration items from the command line is set to overwrite those from the `yaml` file. That is, the final value of each configuration item is evaluated and applied in the following order:
+
+```
+default_value -> value_from_config_file -> value_from_command_line
+```
+
+At least one of the above three values should be given. In this way, you don't have to include all of the config items in the `yaml` file or in the command-line input. You can use either of them, or combine them. Make your choice according to preference and circumstances.
 
 ---
 # Changed
 
-2020.3.14 Add the configuration files of my experiments. 
\ No newline at end of file
+- 2020.3.14 Add the configuration files of my experiments. 
+- 2020.4.14 Detail README.md.
diff --git a/src/data/_AirChange.py b/src/data/_AirChange.py
index 89d257e6a47c3cbf6e14cc80ce3428ed23f5e6da..00e17a1d13504271746f5591378388b2bfd8fb0e 100644
--- a/src/data/_AirChange.py
+++ b/src/data/_AirChange.py
@@ -59,6 +59,9 @@ class _AirChangeDataset(CDDataset):
         label = (label / 255.0).astype(np.uint8)    # To 0,1
         return label if self.phase == 'train' else self.cropper(label)
 
+    def get_name(self, index):
+        return '{loc}-{id}-cm.bmp'.format(loc=self.LOCATION, id=index)
+
     @staticmethod
     def _bmp_loader(bmp_path_wo_ext):
         # Case insensitive .bmp loader
diff --git a/src/data/__init__.py b/src/data/__init__.py
index 14a3da5dbb783326e19999c8198160bdfd6fe4db..1f81254569e7cce132a383614235f34fae5aac6c 100644
--- a/src/data/__init__.py
+++ b/src/data/__init__.py
@@ -1,4 +1,4 @@
-from os.path import join, expanduser, basename, exists
+from os.path import join, expanduser, basename, exists, splitext
 
 import torch
 import torch.utils.data as data
@@ -41,7 +41,7 @@ class CDDataset(data.Dataset):
         if self.phase == 'train':
             return t1, t2, label
         else:
-            return basename(self.label_list[index]), t1, t2, label
+            return self.get_name(index), t1, t2, label
 
     def _read_file_paths(self):
         raise NotImplementedError
@@ -52,6 +52,9 @@ class CDDataset(data.Dataset):
     def fetch_image(self, image_path):
         return default_loader(image_path)
 
+    def get_name(self, index):
+        return splitext(basename(self.label_list[index]))[0]+'.bmp'
+
     def preprocess(self, t1, t2, label):
         if self.transforms[0] is not None:
             # Applied on all
diff --git a/src/utils/metrics.py b/src/utils/metrics.py
index 3a13775ebecab0fdc1e44d705ff32971eddfb98c..9c71f97bdf54c39228e0013e107d721b7f1a9a6b 100644
--- a/src/utils/metrics.py
+++ b/src/utils/metrics.py
@@ -41,7 +41,7 @@ class AverageMeter:
 # These metrics only for numpy arrays
 class Metric(AverageMeter):
     __name__ = 'Metric'
-    def __init__(self, n_classes=2, mode='accum', reduction='binary'):
+    def __init__(self, n_classes=2, mode='separ', reduction='binary'):
         super().__init__(None)
         self._cm = AverageMeter(partial(metrics.confusion_matrix, labels=np.arange(n_classes)))
         assert mode in ('accum', 'separ')
@@ -66,9 +66,12 @@ class Metric(AverageMeter):
             return self._compute(cm)[1]
 
     def update(self, pred, true, n=1):
-        # Note that this is no thread-safe
         self._cm.update(true.ravel(), pred.ravel())
         if self.mode == 'accum':
+            # Note that accumulation mode is special in that metric.val saves historical information.
+            # Therefore, metric.avg IS USUALLY NOT THE "AVERAGE" VALUE YOU WANT!!! 
+            # Instead, metric.val is the averaged result in the sense of metric.avg in separ mode, 
+            # while metric.avg can be considered as some average of average.
             cm = self._cm.sum
         elif self.mode == 'separ':
             cm = self._cm.val
@@ -94,7 +97,7 @@ class Recall(Metric):
 
 class Accuracy(Metric):
     __name__ = 'OA'
-    def __init__(self, n_classes=2, mode='accum'):
+    def __init__(self, n_classes=2, mode='separ'):
         super().__init__(n_classes=n_classes, mode=mode, reduction='none')
     def _compute(self, cm):
         return np.nan_to_num(np.diag(cm).sum()/cm.sum())