diff --git a/README.md b/README.md
index 34a612b07c1cd38aeb5becf0e7da3dbf88426b15..4aa06236f41005bfab1417ce3b8afce43e7bf7ea 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@ Unitgrade is an automatic report and exam evaluation framework that enables inst
     - Instructors can automatically verify the students solution using a Docker VM and run hidden tests
  - Automatic Moss anti-plagiarism detection
  - CMU Autolab integration (Experimental)
+ - A live dashboard which shows the outcome of the tests
 
 ### Install
 Simply use `pip`
@@ -30,6 +31,7 @@ The figure shows an overview of the workflow.
  - You write exercises and a suite of unittests. 
  - They are then compiled to a version of the exercises without solutions. 
  - The students solve the exercises using the tests and when they are happy, they run an automatically generated `_grade.py`-script to produce a `.token`-file with the number of points they obtain. This file is then uploaded for further verification/evaluation.
+ - The students can see their progress and review hints using the dashboard (see below)
 
 ### Videos
 Videos where I try to talk and code my way through the examples can be found on youtube:
@@ -64,7 +66,7 @@ instructor/cs101/deploy.py   # A private file to deploy the tests
 ### The homework
 The homework is just any old python code you would give to the students. For instance:
 ```python
-# example_simplest/instructor/cs101/homework1.py
+# autolab_example_py_upload/instructor/cs102_autolab/homework1.py
 def reverse_list(mylist): #!f 
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
@@ -75,10 +77,9 @@ def reverse_list(mylist): #!f
 def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    return a+b
+    return a+b*2
 
-if __name__ == "__main__":
-    # Example usage:
+if __name__ == "__main__": # Example usage:
     print(f"Your result of 2 + 2 = {add(2,2)}")
     print(f"Reversing a small list", reverse_list([2,3,5,7])) 
 ```
@@ -119,7 +120,12 @@ class Report1(Report):
     pack_imports = [cs101]     # Include all .py files in this folder
 
 if __name__ == "__main__":
-    evaluate_report_student(Report1()) 
+    # from HtmlTestRunner import HTMLTestRunner
+    import HtmlTestRunner
+    unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='example_dir'))
+
+
+    # evaluate_report_student(Report1()) 
 ```
 
 ### Deployment
@@ -168,6 +174,32 @@ This runs an identical set of tests and produces the file `Report1_handin_10_of_
  - You can easily use the framework to include output of functions. 
  - See below for how to validate the students results 
 
+
+### Viewing the results using the dashboard
+I recommend to monitor and run the tests from the IDE, as this allows you to use the debugger in conjunction with your tests. 
+However, unitgrade comes with a dashboard that allows students to see the outcome of individual tests 
+ and what is currently recorded in the `token`-file. To start the dashboard, they should simply run the command
+```
+unitgrade
+```
+from a directory that contains a test (the directory will be searched recursively for test files). 
+ The command will start a small background service and open a webpage:
+
+![The dashboard](https://gitlab.compute.dtu.dk/tuhe/unitgrade/-/raw/master/docs/dashboard.png)
+
+Features supported in the current version:
+ - Shows which files need to be edited to solve the problem
+ - Collect hints given in the homework files and display them for the relevant tests
+ - fully responsive -- the UI, including the terminal, will update while the test is running regardless of where you launch the test
+ - Allows students to re-run tests from the UI
+ - Shows current test status and results captured in `.token`-file
+ - Tested on Windows/Linux 
+ - Frontend is pure javascript and the backend only depends on python packages. 
+
+The frontend is automatically enabled the moment your classes inherits from the `UTestCase`-class; no configuration files required, and there are no known bugs. 
+
+Note the frontend is currently not provided in the pypi `unitgrade` package, but only through the gitlab repository (install using `git clone` and then `pip install -e ./`) -- it seems ready, but I want to test it on mac and a few more systems before publishing it. 
+
 ## How safe is Unitgrade?
 There are three principal ways of cheating:
  - Break the framework and submit a `.token` file that 'lies' about the true number of points
@@ -197,13 +229,19 @@ One of the main advantages of `unitgrade` over web-based autograders it that tes
 # example_framework/instructor/cs102/report2.py
 from unitgrade import UTestCase, cache  
 
+
+
 class Week1(UTestCase):
+    @classmethod
+    def setUpClass(cls) -> None:
+        a = 234
+
     def test_add(self):
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
-    def test_reverse(self):
-        self.assertEqualC(reverse_list([1, 2, 3])) 
+    # def test_reverse(self):
+    #     self.assertEqualC(reverse_list([1, 2, 3])) 
 ```
 Note we have changed the test-function to `self.assertEqualC` (the `C` is for cache) and dropped the expected result. What `unitgrade` will do
 is to evaluate the test *on the working version of the code*, compute the results of the test, 
@@ -213,21 +251,21 @@ is to evaluate the test *on the working version of the code*, compute the result
 Titles can be set either using python docstrings or programmatically. An example:
 ```python 
 # example_framework/instructor/cs102/report2.py
-class Week1Titles(UTestCase): 
-    """ The same problem as before with nicer titles """
-    def test_add(self):
-        """ Test the addition method add(a,b) """
-        self.assertEqualC(add(2,2))
-        print("output generated by test")
-        self.assertEqualC(add(-100, 5))
-        # self.assertEqual(2,3, msg="This test automatically fails.")
-
-    def test_reverse(self):
-        ls = [1, 2, 3]
-        reverse = reverse_list(ls)
-        self.assertEqualC(reverse)
-        # Although the title is set after the test potentially fails, it will *always* show correctly for the student.
-        self.title = f"Checking if reverse_list({ls}) = {reverse}"  # Programmatically set the title 
+# class Week1Titles(UTestCase): 
+#     """ The same problem as before with nicer titles """
+#     def test_add(self):
+#         """ Test the addition method add(a,b) """
+#         self.assertEqualC(add(2,2))
+#         print("output generated by test")
+#         self.assertEqualC(add(-100, 5))
+#         # self.assertEqual(2,3, msg="This test automatically fails.")
+#
+#     def test_reverse(self):
+#         ls = [1, 2, 3]
+#         reverse = reverse_list(ls)
+#         self.assertEqualC(reverse)
+#         # Although the title is set after the test potentially fails, it will *always* show correctly for the student.
+#         self.title = f"Checking if reverse_list({ls}) = {reverse}"  # Programmatically set the title 
 ```
 When this is run, the titles are shown as follows:
 ```terminal
@@ -236,7 +274,7 @@ When this is run, the titles are shown as follows:
 | | | |_ __  _| |_| |  \/_ __ __ _  __| | ___ 
 | | | | '_ \| | __| | __| '__/ _` |/ _` |/ _ \
 | |_| | | | | | |_| |_\ \ | | (_| | (_| |  __/
- \___/|_| |_|_|\__|\____/_|  \__,_|\__,_|\___| v0.1.17, started: 19/05/2022 15:14:09
+ \___/|_| |_|_|\__|\____/_|  \__,_|\__,_|\___| v0.1.22, started: 15/06/2022 09:18:15
 
 CS 102 Report 2 
 Question 1: Week1                                                                                                       
@@ -250,9 +288,10 @@ Question 2: The same problem as before with nicer titles
  * q2.2) Checking if reverse_list([1, 2, 3]) = [3, 2, 1]............................................................PASS
  * q2)   Total...................................................................................................... 6/6
  
-Total points at 15:14:09 (0 minutes, 0 seconds)....................................................................16/16
+Total points at 09:18:16 (0 minutes, 0 seconds)....................................................................16/16
 
 Including files in upload...
+path.: _NamespacePath(['C:\\Users\\tuhe\\Documents\\unitgrade_private\\examples\\example_framework\\instructor\\cs102', 'C:\\Users\\tuhe\\Documents\\unitgrade_private\\examples\\example_framework\\instructor\\cs102'])
  * cs102
 > Testing token file integrity...
 Done!
@@ -267,21 +306,21 @@ What happens behind the scenes when we set `self.title` is that the result is pr
 The `@cache`-decorator offers a direct ways to compute the correct result on an instructors computer and submit it to the student. For instance:
 ```python
 # example_framework/instructor/cs102/report2.py
-class Question2(UTestCase): 
-    @cache
-    def my_reversal(self, ls):
-        # The '@cache' decorator ensures the function is not run on the *students* computer
-        # Instead the code is run on the teachers computer and the result is passed on with the
-        # other pre-computed results -- i.e. this function will run regardless of how the student happens to have
-        # implemented reverse_list.
-        return reverse_list(ls)
-
-    def test_reverse_tricky(self):
-        ls = (2,4,8)
-        ls2 = self.my_reversal(tuple(ls))                   # This will always produce the right result, [8, 4, 2]
-        print("The correct answer is supposed to be", ls2)  # Show students the correct answer
-        self.assertEqualC(reverse_list(ls))                 # This will actually test the students code.
-        return "Buy world!"                                 # This value will be stored in the .token file  
+# class Question2(UTestCase): 
+#     @cache
+#     def my_reversal(self, ls):
+#         # The '@cache' decorator ensures the function is not run on the *students* computer
+#         # Instead the code is run on the teachers computer and the result is passed on with the
+#         # other pre-computed results -- i.e. this function will run regardless of how the student happens to have
+#         # implemented reverse_list.
+#         return reverse_list(ls)
+#
+#     def test_reverse_tricky(self):
+#         ls = (2,4,8)
+#         ls2 = self.my_reversal(tuple(ls))                   # This will always produce the right result, [8, 4, 2]
+#         print("The correct answer is supposed to be", ls2)  # Show students the correct answer
+#         self.assertEqualC(reverse_list(ls))                 # This will actually test the students code.
+#         return "Buy world!"                                 # This value will be stored in the .token file  
 ```
 The `@cache` decorator will make sure the output of the function is pre-computed when the test is set up, and that the function will 
 simply return the correct result regardless of the function body. This is very helpful in a few situations:
@@ -503,26 +542,30 @@ The code for the example can be found in `examples/autolab_example`. It consists
 
 Concretely, the following code will download and build the image (note this code must be run on the same machine that you have installed Autolab on)
 ```python
-# autolab_token_upload/deploy_autolab.py
+# autolab_example_py_upload/instructor/cs102_autolab/deploy_autolab.py
     # Step 1: Download and compile docker grading image. You only need to do this once.  
-    download_docker_images("./docker") # Download docker images from gitlab (only do this once.
-    dockerfile = f"./docker/docker_tango_python/Dockerfile"
-    autograde_image = 'tango_python_tue'
-    compile_docker_image(Dockerfile=dockerfile, tag=autograde_image)  # Compile docker image. 
+    download_docker_images("../docker") # Download docker images from gitlab (only do this once).
+    dockerfile = f"../docker/docker_tango_python/Dockerfile"
+    autograde_image = 'tango_python_tue2'  # Tag given to the image in case you have multiple images.
+    compile_docker_image(Dockerfile=dockerfile, tag=autograde_image, no_cache=False)  # Compile docker image. 
 ```
 Next, simply call the framework to compile any `_grade.py`-file into an Autolab-compatible `.tar` file that can be imported from the web interface. The script requires you to specify 
 both the instructor-directory and the directory with the files the student have been handed out (i.e., the same file-system format we have seen earlier). 
 ```python
-# autolab_token_upload/deploy_autolab.py
+# autolab_example_py_upload/instructor/cs102_autolab/deploy_autolab.py
     # Step 2: Create the cs102.tar file from the grade scripts. 
-    instructor_base = f"../example_framework/instructor"
-    student_base = f"../example_framework/students"
-    output_tar = deploy_assignment("cs102",  # Autolab name of assignment (and name of .tar file)
+    instructor_base = f"."
+    student_base = f"../../students/cs102_autolab"
+
+    from report2_test import Report2
+    # INSTRUCTOR_GRADE_FILE =
+    output_tar = new_deploy_assignment("cs105h",  # Autolab name of assignment (and name of .tar file)
                                    INSTRUCTOR_BASE=instructor_base,
-                                   INSTRUCTOR_GRADE_FILE=f"{instructor_base}/cs102/report2_grade.py",
+                                   INSTRUCTOR_GRADE_FILE=f"{instructor_base}/report2_test_grade.py",
                                    STUDENT_BASE=student_base,
-                                   STUDENT_GRADE_FILE=f"{student_base}/cs102/report2_grade.py",
-                                   autograde_image_tag=autograde_image) 
+                                   STUDENT_GRADE_FILE=f"{instructor_base}/report2_test.py",
+                                   autograde_image_tag=autograde_image,
+                                   homework_file="homework1.py") 
 ```
 This will produce a file `cs102.tar`. Whereas you needed to build the Docker image on the machine where you are running Autolab, you can build the lab assignments on any computer.
 ### Step 3: Upload the `.tar` lab-assignment file 
@@ -548,9 +591,9 @@ and TAs can choose to annotate the students code directly in Autolab -- we are h
 # Citing
 ```bibtex
 @online{unitgrade_devel,
-	title={Unitgrade-devel (0.1.39): \texttt{pip install unitgrade-devel}},
+	title={Unitgrade-devel (0.1.42): \texttt{pip install unitgrade-devel}},
 	url={https://lab.compute.dtu.dk/tuhe/unitgrade_private},
-	urldate = {2022-06-15}, 
+	urldate = {2022-09-16}, 
 	month={9},
 	publisher={Technical University of Denmark (DTU)},
 	author={Tue Herlau},
diff --git a/devel/example_devel/instructor/cache.db b/devel/example_devel/instructor/cache.db
index eba28aab5e607cfee36521a00079738cc07361f5..e9ae36147afd797558b6b77f20a38a305540b7b1 100644
Binary files a/devel/example_devel/instructor/cache.db and b/devel/example_devel/instructor/cache.db differ
diff --git a/devel/example_devel/instructor/cache.db-wal b/devel/example_devel/instructor/cache.db-wal
deleted file mode 100644
index 34e446fccbfd0910c31bfcdac9d965261a56f7d5..0000000000000000000000000000000000000000
Binary files a/devel/example_devel/instructor/cache.db-wal and /dev/null differ
diff --git a/devel/example_devel/instructor/cs108/report_devel_grade.py b/devel/example_devel/instructor/cs108/report_devel_grade.py
index 3237fe6f968311c7323e3ae4a036739d7c1391b2..3a545e1d1275504b96a71bddad66ac6e2a03ea46 100644
--- a/devel/example_devel/instructor/cs108/report_devel_grade.py
+++ b/devel/example_devel/instructor/cs108/report_devel_grade.py
@@ -488,8 +488,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = '# from unitgrade import hide\n# from unitgrade import utils\n# import os\n# import lzma\n# import pickle\n\n# DONT\'t import stuff here since install script requires __version__\n\n# def cache_write(object, file_name, verbose=True):\n#     # raise Exception("bad")\n#     # import compress_pickle\n#     dn = os.path.dirname(file_name)\n#     if not os.path.exists(dn):\n#         os.mkdir(dn)\n#     if verbose: print("Writing cache...", file_name)\n#     with lzma.open(file_name, \'wb\', ) as f:\n#         pickle.dump(object, f)\n#     if verbose: print("Done!")\n#\n#\n# def cache_exists(file_name):\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     return os.path.exists(file_name)\n#\n#\n# def cache_read(file_name):\n#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     if os.path.exists(file_name):\n#         try:\n#             with lzma.open(file_name, \'rb\') as f:\n#                 return pickle.load(f)\n#         except Exception as e:\n#             print("Tried to load a bad pickle file at", file_name)\n#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n#             print(e)\n#             # return pickle.load(f)\n#     else:\n#         return None\n\n\n\nimport re\nimport sys\nimport threading\nimport time\nimport lzma\nimport hashlib\nimport pickle\nimport base64\nfrom collections import namedtuple\nfrom io import StringIO\nimport numpy as np\nimport tqdm\nfrom colorama import Fore\nfrom functools import _make_key\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\n\ndef gprint(s):\n    print(f"{Fore.LIGHTGREEN_EX}{s}")\n\n\nmyround = lambda x: np.round(x)  # required for obfuscation.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\n"""\nClean up the various output-related helper classes.\n"""\nclass Logger(object):\n    def __init__(self, buffer, write_to_stdout=True):\n        # assert False\n        self.terminal = sys.stdout\n        self.write_to_stdout = write_to_stdout\n        self.log = buffer\n\n    def write(self, message):\n        if self.write_to_stdout:\n            self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\n\nclass Capturing(list):\n    def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n        self._stdout = stdout\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True):  # don\'t put arguments here.\n        self._stdout = sys.stdout if self._stdout == None else self._stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO()  # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n    def __exit__(self, *args):\n        lines = self._stringio.getvalue().splitlines()\n        txt = "\\n".join(lines)\n        numbers = extract_numbers(rm_progress_bar(txt))\n        self.extend(lines)\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n        self.output = txt\n        self.numbers = numbers\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct + 1)\n            if i > 0 and l.find("|", i + 1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None, mute_stdout=False):\n        if file == None:\n            file = sys.stdout\n        self.file = file\n        self.mute_stdout = mute_stdout\n        self._running = False\n        self.title = title\n        self.dt = 0.025\n        self.n = max(1, int(np.round(t / self.dt)))\n        self.show_progress_bar = show_progress_bar\n        self.pbar = None\n\n        if start:\n            self.start()\n\n    def start(self):\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            self._stdout = sys.stdout\n            sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            print("Stopping a progress bar which is not running (class unitgrade.utils.ActiveProgress")\n            pass\n            # raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar = None\n\n        self.file.flush()\n\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            sys.stdout = self._stdout #= sys.stdout\n\n            # sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n        t_ = time.time()\n        for _ in range(self.n - 1):  # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n            tc = time.time()\n            tic = max(0, self.dt - (tc - t_))\n            if tic > 0:\n                time.sleep(tic)\n            t_ = time.time()\n            self.pbar.update(1)\n\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n    if file == None:\n        file = sys.stdout\n    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n    print(first + dot_parts, end="", file=file)\n    last += extra\n    print(last, file=file)\n\n\ndef hide(func):\n    return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    return newDecorator\n\n\nhide = makeRegisteringDecorator(hide)\n\n\ndef extract_numbers(txt):\n    numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))\n    return all\n\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n        # print(self._cache.keys())\n        # for k in self._cache:\n        #     print(k)\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n            # This appears to be required since there are two caches. Otherwise, when deploy method is run twice,\n            # the cache will not be set correctly.\n            self._cache_put(key, value)\n        return value\n\n    return wrapper\n\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n""" Methods responsible for turning a dictionary into a string that can be pickled or put into a json file. """\ndef dict2picklestring(dd):\n    """\n    Turns a dictionary into a string with some compression.\n\n    :param dd:\n    :return:\n    """\n    b = lzma.compress(pickle.dumps(dd))\n    b_hash = hashlib.blake2b(b).hexdigest()\n    return base64.b64encode(b).decode("utf-8"), b_hash\n\ndef picklestring2dict(picklestr):\n    """ Reverse of the above method: Turns the string back into a dictionary. """\n    b = base64.b64decode(picklestr)\n    hash = hashlib.blake2b(b).hexdigest()\n    dictionary = pickle.loads(lzma.decompress(b))\n    return dictionary, hash\n\ntoken_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70\ndef load_token(file_in):\n    """ We put this one here to allow loading of token files for the dashboard. """\n    with open(file_in, \'r\') as f:\n        s = f.read()\n    splt = s.split(token_sep)\n    data = splt[-1]\n    info = splt[-2]\n    head = token_sep.join(splt[:-2])\n    plain_text=head.strip()\n    hash, l1 = info.split(" ")\n    data = "".join( data.strip()[1:-1].splitlines() )\n    l1 = int(l1)\n    dictionary, b_hash = picklestring2dict(data)\n    assert len(data) == l1\n    assert b_hash == hash.strip()\n    return dictionary, plain_text\n\n\n\n## Key/value store related.\n\n\nimport io\nimport sys\nimport time\nimport unittest\nfrom unittest.runner import _WritelnDecorator\nimport numpy as np\n\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    number = -1  # HAcky way to set question number.\n    show_progress_bar = True\n    unmute = False # Whether to redirect stdout.\n    cc = None\n    setUpClass_time = 3 # Estimated time to run setUpClass in TestCase. Must be set externally. See key (("ClassName", "setUpClass"), "time") in _cache.\n\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # TODO: Fix here. probably also needs to flush stdout.\n        self.printErrorList(\'ERROR\', [(test, res[\'stderr\']) for test, res in self.errors])\n        self.printErrorList(\'FAIL\',  [(test, res[\'stderr\']) for test, res in self.failures])\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addError(test, err)\n        err = self.errors[-1][1]\n        if hasattr(sys.stdout, \'log\'):\n            stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        else:\n            stdout = ""\n        self.errors[-1] = (self.errors[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n\n        if not hasattr(self, \'item_title_print\'):\n            # In case setUpClass() fails with an error the short description may not be set. This will fix that problem.\n            self.item_title_print = test.shortDescription()\n            if self.item_title_print is None:  # In case the short description is not set either...\n                self.item_title_print = test.id()\n\n\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        err = self.failures[-1][1]\n        stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        self.failures[-1] = (self.failures[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n        self.cc_terminate(success=False)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        msg = None\n        stdout = sys.stdout.log.readlines() # Only works because we set sys.stdout to a unitgrade.Logger\n\n        if hasattr(test, \'_get_outcome\'):\n            o = test._get_outcome()\n            if isinstance(o, dict):\n                key = (test.cache_id(), "return")\n                if key in o:\n                    msg = test._get_outcome()[key]\n\n        # print(sys.stdout.readlines())\n        self.successes.append((test, None))  # (test, message) (to be consistent with failures and errors).\n        self.successes[-1] = (self.successes[-1][0], {\'return\': msg,\n                                 \'stdout\': stdout,\n                                 \'stderr\': None})\n\n        self.cc_terminate()\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            self.cc.file.flush()\n            ss = self.item_title_print\n\n            state = "PASS" if success else "FAILED"\n\n            dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n            if self.show_progress_bar or True:\n                print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n            else:\n                print(dot_parts, end="", file=self.cc.file)\n\n            if tsecs >= 0.5:\n                state += " (" + str(tsecs) + " seconds)"\n            print(state, file=self.cc.file)\n\n    def startTest(self, test):\n        name = test.__class__.__name__\n        if self.testsRun == 0 and hasattr(test.__class__, \'_cache2\'): # Disable this if the class is pure unittest.TestCase\n            # This is the first time we are running a test. i.e. we can time the time taken to call setupClass.\n            if test.__class__._cache2 is None:\n                test.__class__._cache2 = {}\n            test.__class__._cache2[((name, \'setUpClass\'), \'time\')] = time.time() - self.t_start\n\n        self.testsRun += 1\n        item_title = test.shortDescription()  # Better for printing (get from cache).\n\n        if item_title == None:\n            # For unittest framework where getDescription may return None.\n            item_title = self.getDescription(test)\n        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n        # if self.show_progress_bar or True:\n        estimated_time = test.__class__._cache.get(((name, test._testMethodName), \'time\'), 100) if hasattr(test.__class__, \'_cache\') else 4\n        self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        # else:\n        #     print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n        self._test = test\n        # if not self.unmute:\n        self._stdout = sys.stdout # Redundant. remove later.\n        sys.stdout = Logger(io.StringIO(), write_to_stdout=self.unmute)\n\n    def stopTest(self, test):\n        # if not self.unmute:\n        buff = sys.stdout.log\n        sys.stdout = self._stdout # redundant.\n        buff.close()\n        super().stopTest(test)\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            self.t_start = time.time()\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.framework.py>"\n\n            cc = ActiveProgress(t=self.setUpClass_time, title=q_title_print, show_progress_bar=self.show_progress_bar, mute_stdout=not self.unmute)\n            self.cc = cc\n\n\n    def _restoreStdout(self):  # Used when setting up the test.\n        if self._previousTestClass is None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            if self.show_progress_bar:\n                print(self.cc.title, end="")\n            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        stream = io.StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\nimport importnb\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport inspect\nimport colorama\nimport unittest\nimport time\nimport textwrap\nimport urllib.parse\nimport requests\nimport ast\nimport numpy\nfrom diskcache import Cache\n\ncolorama.init(autoreset=True)  # auto resets your settings after every output\nnumpy.seterr(all=\'raise\')\n\n\ndef setup_dir_by_class(C, base_dir):\n    name = C.__class__.__name__\n    return base_dir, name\n\n\nclass DKPupDB:\n    def __init__(self, artifact_file, use_pupdb=True):\n        # Make a double-headed disk cache thingy.\n        self.dk = Cache(os.path.dirname(artifact_file)) # Start in this directory.\n        self.name_ = os.path.basename(artifact_file[:-5])\n        if self.name_ not in self.dk:\n            self.dk[self.name_] = dict()\n        self.use_pupdb = use_pupdb\n        if self.use_pupdb:\n            from pupdb.core import PupDB\n            self.db_ = PupDB(artifact_file)\n\n    def __setitem__(self, key, value):\n        if self.use_pupdb:\n            self.db_.set(key, value)\n        with self.dk.transact():\n            d = self.dk[self.name_]\n            d[key] = value\n            self.dk[self.name_] = d\n            self.dk[self.name_ + "-updated"] = True\n\n    def __getitem__(self, item):\n        v = self.dk[self.name_][item]\n        if self.use_pupdb:\n            v2 = self.db_.get(item)\n            if v != v2:\n                print("Mismatch v1, v2 for ", item)\n        return v\n\n    def keys(self): # This one is also deprecated.\n        return tuple(self.dk[self.name_].keys()) #.iterkeys())\n        # return self.db_.keys()\n\n    def set(self, item, value): # This one is deprecated.\n        self[item] = value\n\n    def get(self, item, default=None):\n        return self[item] if item in self else default\n\n    def __contains__(self, item):\n        return item in self.dk[self.name_] #keys()\n        # return item in self.dk\n\n\n_DASHBOARD_COMPLETED_MESSAGE = "Dashboard> Evaluation completed."\n\n# Consolidate this code.\nclass classmethod_dashboard(classmethod):\n    def __init__(self, f):\n        def dashboard_wrap(cls: UTestCase):\n            if not cls._generate_artifacts:\n                f(cls)\n                return\n\n            db = DKPupDB(cls._artifact_file_for_setUpClass())\n            r = np.random.randint(1000 * 1000)\n            db.set(\'run_id\', r)\n            db.set(\'coverage_files_changed\', None)\n\n            state_ = \'fail\'\n            try:\n                _stdout = sys.stdout\n                _stderr = sys.stderr\n                std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n                # Run this unittest and record all of the output.\n                # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n                # sys.stdout = stdout_capture\n                sys.stderr = std_capture.dummy_stderr\n                sys.stdout = std_capture.dummy_stdout\n                db.set("state", "running")\n                f(cls)\n                state_ = \'pass\'\n            except Exception as e:\n                from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n                state_ = \'fail\'\n                db.set(\'state\', state_)\n                exi = e\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                raise e\n            finally:\n                db.set(\'state\', state_)\n                std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n                sys.stdout = _stdout\n                sys.stderr = _stderr\n                std_capture.close()\n        super().__init__(dashboard_wrap)\n\nclass Report:\n    title = "report title"\n    abbreviate_questions = False # Should the test items start with \'Question ...\' or just be q1).\n    version = None # A version number of the report (1.0). Used to compare version numbers with online resources.\n    url = None  # Remote location of this problem.\n\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    _remote_check_cooldown_seconds = 1  # Seconds between remote check of report.\n    nL = 120  # Maximum line width\n    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.\n    _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc.\n\n    @classmethod\n    def reset(cls):\n        for (q, _) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    @classmethod\n    def mfile(clc):\n        return inspect.getfile(clc)\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._file()), "unitgrade_data/main_config_"+ os.path.basename(self._file()[:-3]) + ".artifacts.pkl")\n\n    def _is_run_in_grade_mode(self):\n        """ True if this report is being run as part of a grade run. """\n        return self._file().endswith("_grade.py") # Not sure I love this convention.\n\n    def _import_base_relative(self):\n        if hasattr(self.pack_imports[0], \'__path__\'):\n            root_dir = self.pack_imports[0].__path__[0]\n        else:\n            root_dir = self.pack_imports[0].__file__\n\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n        relative_path = relative_path.replace("\\\\", "/")\n        return root_dir, relative_path, modules\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        for (q, _) in self.questions:\n            q.nL = self.nL  # Set maximum line length.\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        loader = unittest.TestLoader()\n        for q, _ in self.questions:\n            start = time.time()  #\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time() - start\n            q.time = total\n\n    def _setup_answers(self, with_coverage=False, verbose=True):\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = True\n                q._report = self\n        for q, _ in self.questions:\n            q._setup_answers_mode = True\n            # q._generate_artifacts = False # Disable artifact generation when the report is being set up.\n\n        evaluate_report_student(self, unmute=verbose, noprogress=not verbose, generate_artifacts=False) # Disable artifact generation.\n\n        # self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            # print(self.questions)\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                # print("q is", q())\n                report_cache[q.__qualname__] = q._cache2\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in framework.py\': True}\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = False\n\n        # report_cache is saved on a per-question basis.\n        # it could also contain additional information such as runtime metadata etc. This may not be appropriate to store with the invidivual questions(?).\n        # In this case, the function should be re-defined.\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n        self._config = payloads[\'config\']\n\n    def _check_remote_versions(self):\n        if self.url is None:\n            return\n        url = self.url\n        if not url.endswith("/"):\n            url += "/"\n        snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot"\n        if os.path.isfile(snapshot_file):\n            with open(snapshot_file, \'r\') as f:\n                t = f.read()\n                if (time.time() - float(t)) < self._remote_check_cooldown_seconds:\n                    return\n\n        if self.url.startswith("https://gitlab"):\n            # Try to turn url into a \'raw\' format.\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            # url = self.url\n            url = url.replace("-/tree", "-/raw")\n            # print(url)\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false")\n            # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n            if self._is_run_in_grade_mode():\n                remote_source = requests.get(raw_url).text\n                with open(self._file(), \'r\') as f:\n                    local_source = f.read()\n                if local_source != remote_source:\n                    print("\\nThe local version of this report is not identical to the remote version which can be found at")\n                    print(self.url)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of grade script does not match the remote version. Please update using git pull")\n            else:\n                text = requests.get(raw_url).text\n                node = ast.parse(text)\n                classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                for b in classes.body:\n                    # print(b.)\n                    if b.targets[0].id == "version":\n                        # print(b)\n                        # print(b.value)\n                        version_remote = b.value.value\n                        break\n                if version_remote != self.version:\n                    print("\\nThe version of this report", self.version, "does not match the version of the report on git", version_remote)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.")\n\n                for (q,_) in self.questions:\n                    qq = q(skip_remote_check=True)\n                    cfile = q._cache_file()\n\n                    relpath = os.path.relpath(cfile, os.path.dirname(self._file()))\n                    relpath = relpath.replace("\\\\", "/")\n                    raw_url = urllib.parse.urljoin(url, relpath + "?inline=false")\n                    # requests.get(raw_url)\n\n                    with open(cfile, \'rb\') as f:\n                        b1 = f.read()\n\n                    b2 = requests.get(raw_url).content\n                    if b1 != b2:\n                        print("\\nQuestion ", qq.title, "relies on the data file", cfile)\n                        print("However, it appears that this file is missing or in a different version than the most recent found here:")\n                        print(self.url)\n                        print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                        print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                        print("This can be done by simply running the command")\n                        print("> git pull")\n                        print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!")\n                        raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.")\n\n                t = time.time()\n                if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"):\n                    with open(snapshot_file, \'w\') as f:\n                        f.write(f"{t}")\n\ndef get_hints(ss):\n    """ Extract all blocks of the forms:\n\n    Hints:\n    bla-bla.\n\n    and returns the content unaltered.\n    """\n    if ss == None:\n        return None\n    try:\n        ss = textwrap.dedent(ss)\n        ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n        hints = ["hints:", "hint:"]\n        indexes = [ss.lower().find(h) for h in hints]\n        j = np.argmax(indexes)\n        if indexes[j] == -1:\n            return None\n        h = hints[j]\n        ss = ss[ss.lower().find(h) + len(h) + 1:]\n        ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n        ss = textwrap.dedent(ss).strip()\n        # if ss.startswith(\'*\'):\n        #     ss = ss[1:].strip()\n        return ss\n    except Exception as e:\n        print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n    # a = 234\n    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache. Ensures method always produce same result.\n    _cache2 = None  # User-written cache.\n    _with_coverage = False\n    _covcache = None # Coverage cache. Written to if _with_coverage is true.\n    _report = None  # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n    _run_in_report_mode = True\n\n    _generate_artifacts = True # Whether the file will generate the artifact .json files. This is used in the _grade-script mode.\n    # If true, the tests will not fail when cache is used. This is necesary since otherwise the cache will not be updated\n    # during setup, and the deploy script must be run many times.\n    _setup_answers_mode = False\n\n    def capture(self):\n        if hasattr(self, \'_stdout\') and self._stdout is not None:\n            file = self._stdout\n        else:\n            file = sys.stdout\n        return Capturing2(stdout=file)\n\n    @classmethod\n    def question_title(cls):\n        """ Return the question title """\n        if cls.__doc__ is not None:\n            title = cls.__doc__.strip().splitlines()[0].strip()\n            if not (title.startswith("Hints:") or title.startswith("Hint:") ):\n                return title\n        return cls.__qualname__\n\n    def run(self, result):\n        # print("Run called in test framework...", self._generate_artifacts)\n        if not self._generate_artifacts:\n            return super().run(result)\n        from unittest.case import TestCase\n\n\n        db = DKPupDB(self._artifact_file())\n        db.set("state", "running")\n        db.set(\'run_id\', np.random.randint(1000*1000))\n        db.set(\'coverage_files_changed\', None)\n\n\n        _stdout = sys.stdout\n        _stderr = sys.stderr\n\n        std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n        # stderr_capture = StdCapturing(sys.stderr, db=db)\n        # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db)\n        state_ = None\n        try:\n            # Run this unittest and record all of the output.\n            # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n            # sys.stdout = stdout_capture\n            sys.stderr = std_capture.dummy_stderr\n            sys.stdout = std_capture.dummy_stdout\n\n            result_ = TestCase.run(self, result)\n\n            from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n            # print(result_._excinfo[0])\n            actual_errors = []\n            for test, err in self._error_fed_during_run:\n                if err is None:\n                    continue\n                else:\n                    import traceback\n                    # traceback.print_tb(err[2])\n                    actual_errors.append(err)\n\n            if len(actual_errors) > 0:\n                ex, exi, tb = actual_errors[0]\n                exi.__traceback__ = tb\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                # db.set(\'state\', \'fail\')\n                state_ = "fail"\n            else:\n                state_ = "pass"\n        except Exception as e:\n            state_ = "fail"\n            import traceback\n            traceback.print_exc()\n            raise e\n        finally:\n            db.set(\'state\', state_)\n            std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n            sys.stdout = _stdout\n            sys.stderr = _stderr\n            std_capture.close()\n        return result_\n\n    def _callSetUp(self):\n        if self._with_coverage:\n            if self._covcache is None:\n                self._covcache = {}\n            import coverage\n            self.cov = coverage.Coverage(data_file=None)\n            self.cov.start()\n        self.setUp()\n\n    def _callTearDown(self):\n        self.tearDown()\n        # print("Teardown.")\n        if self._with_coverage:\n            # print("with cov")\n            from pathlib import Path\n            from snipper import snipper_main\n            try:\n                self.cov.stop()\n            except Exception as e:\n                print("Something went wrong while tearing down coverage test")\n                print(e)\n            data = self.cov.get_data()\n            base, _, _ = self._report._import_base_relative()\n            for file in data.measured_files():\n                file = os.path.normpath(file)\n                root = Path(base)\n                child = Path(file)\n                if root in child.parents:\n                    # print("Reading file", child)\n                    with open(child, \'r\') as f:\n                        s = f.read()\n                    lines = s.splitlines()\n                    garb = \'GARBAGE\'\n                    lines2 = snipper_main.censor_code(lines, keep=True)\n                    # print("\\n".join(lines2))\n                    if len(lines) != len(lines2):\n                        for k in range(len(lines)):\n                            print(k, ">", lines[k], "::::::::", lines2[k])\n                        print("Snipper failure; line lenghts do not agree. Exiting..")\n                        print(child, "len(lines) == len(lines2)", len(lines), len(lines2))\n                        import sys\n                        sys.exit()\n\n                    assert len(lines) == len(lines2)\n                    for ll in data.contexts_by_lineno(file):\n                        l = ll-1\n                        if l < len(lines2) and lines2[l].strip() == garb:\n                            # print("Got a hit at l", l)\n                            rel = os.path.relpath(child, root)\n                            cc = self._covcache\n                            j = 0\n                            for j in range(l, -1, -1):\n                                if "def" in lines2[j] or "class" in lines2[j]:\n                                    break\n                            from snipper.legacy import gcoms\n\n                            fun = lines2[j]\n                            comments, _ = gcoms("\\n".join(lines2[j:l]))\n                            if rel not in cc:\n                                cc[rel] = {}\n                            cc[rel][fun] = (l, "\\n".join(comments))\n                            # print("found", rel, fun)\n                            self._cache_put((self.cache_id(), \'coverage\'), self._covcache)\n\n    def shortDescriptionStandard(self):\n        sd = super().shortDescription()\n        if sd is None or sd.strip().startswith("Hints:") or sd.strip().startswith("Hint:"):\n            sd = self._testMethodName\n        return sd\n\n    def shortDescription(self):\n        sd = self.shortDescriptionStandard()\n        title = self._cache_get((self.cache_id(), \'title\'), sd)\n        return title if title is not None else sd\n\n    @property\n    def title(self):\n        return self.shortDescription()\n\n    @title.setter\n    def title(self, value):\n        self._cache_put((self.cache_id(), \'title\'), value)\n\n    def _get_outcome(self):\n        if not hasattr(self.__class__, \'_outcome\') or self.__class__._outcome is None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        self._ensure_cache_exists()  # Make sure cache is there.\n        if self._testMethodDoc is not None:\n            self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n        self._cache2[(self.cache_id(), \'assert\')] = {}\n        res = testMethod()\n        elapsed = time.time() - t\n        self._get_outcome()[ (self.cache_id(), "return") ] = res\n        self._cache_put((self.cache_id(), "time"), elapsed)\n\n\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return c, m\n\n    def __init__(self, *args, skip_remote_check=False, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self._assert_cache_index = 0\n        # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the\n        # cache exists locally.\n        # Find the report class this class is defined within.\n        if skip_remote_check:\n            return\n        import importlib, inspect\n        found_reports = []\n        # print("But do I have report", self._report)\n        # print("I think I am module", self.__module__)\n        # print("Importlib says", importlib.import_module(self.__module__))\n        # This will delegate you to the wrong main clsas when running in grade mode.\n        for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass):\n            # print("checking", cls)\n            if issubclass(cls, Report):\n                for q,_ in cls.questions:\n                    if q == self.__class__:\n                        found_reports.append(cls)\n        if len(found_reports) == 0:\n            pass # This case occurs when the report _grade script is being run.\n            # raise Exception("This question is not a member of a report. Very, very odd.")\n        if len(found_reports) > 1:\n            raise Exception("This question is a member of multiple reports. That should not be the case -- don\'t get too creative.")\n        if len(found_reports) > 0:\n            report = found_reports[0]\n            report()._check_remote_versions()\n\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def get_expected_test_value(self):\n        key = (self.cache_id(), \'assert\')\n        id = self._assert_cache_index\n        cache = self._cache_get(key)\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        return _expected\n\n    def wrap_assert(self, assert_fun, first, *args, **kwargs):\n        key = (self.cache_id(), \'assert\')\n        if not self._cache_contains(key):\n            print("Warning, framework missing", key)\n            self.__class__._cache[key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.\n        cache = self._cache_get(key)\n        id = self._assert_cache_index\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        if not id in cache:\n            print("Warning, framework missing cache index", key, "id =", id, " - The test will be skipped for now.")\n            if self._setup_answers_mode:\n                _expected = first # Bypass by setting equal to first. This is in case multiple self.assertEqualC\'s are run in a row and have to be set.\n\n        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n        cache[id] = first\n        self._cache_put(key, cache)\n        self._assert_cache_index += 1\n        if not self._setup_answers_mode:\n            assert_fun(first, _expected, *args, **kwargs)\n        else:\n            try:\n                assert_fun(first, _expected, *args, **kwargs)\n            except Exception as e:\n                print("Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.", id)\n                print("> first", first)\n                print("> expected", _expected)\n                print(e)\n\n\n    def assertEqualC(self, first, msg=None):\n        self.wrap_assert(self.assertEqual, first, msg)\n\n    def _shape_equal(self, first, second):\n        a1 = np.asarray(first).squeeze()\n        a2 = np.asarray(second).squeeze()\n        msg = None\n        msg = "" if msg is None else msg\n        if len(msg) > 0:\n            msg += "\\n"\n        self.assertEqual(a1.shape, a2.shape, msg=msg + "Dimensions of input data does not agree.")\n        assert(np.all(np.isinf(a1) == np.isinf(a2)))  # Check infinite part.\n        a1[np.isinf(a1)] = 0\n        a2[np.isinf(a2)] = 0\n        diff = np.abs(a1 - a2)\n        return diff\n\n    def assertLinf(self, first, second=None, tol=1e-5, msg=None):\n        """ Test in the L_infinity norm.\n        :param first:\n        :param second:\n        :param tol:\n        :param msg:\n        :return:\n        """\n        if second is None:\n            return self.wrap_assert(self.assertLinf, first, tol=tol, msg=msg)\n        else:\n            diff = self._shape_equal(first, second)\n            np.testing.assert_allclose(first, second, atol=tol)\n            \n            max_diff = max(diff.flat)\n            if max_diff >= tol:\n                from unittest.util import safe_repr\n                # msg = f\'{safe_repr(first)} != {safe_repr(second)} : Not equal within tolerance {tol}\'\n                # print(msg)\n                # np.testing.assert_almost_equal\n                # import numpy as np\n                print(f"|first - second|_max = {max_diff} > {tol} ")\n                np.testing.assert_almost_equal(first, second)\n                # If the above fail, make sure to throw an error:\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=f\'Not equal within tolerance {tol}\')\n\n    def assertL2(self, first, second=None, tol=1e-5, msg=None, relative=False):\n        if second is None:\n            return self.wrap_assert(self.assertL2, first, tol=tol, msg=msg, relative=relative)\n        else:\n            # We first test using numpys build-in testing method to see if one coordinate deviates a great deal.\n            # This gives us better output, and we know that the coordinate wise difference is lower than the norm difference.\n            if not relative:\n                np.testing.assert_allclose(first, second, atol=tol)\n            diff = self._shape_equal(first, second)\n            diff = ( ( np.asarray( diff.flatten() )**2).sum() )**.5\n\n            scale = (2/(np.linalg.norm(np.asarray(first).flat) + np.linalg.norm(np.asarray(second).flat)) ) if relative else 1\n            max_diff = diff*scale\n            if max_diff >= tol:\n                msg = "" if msg is None else msg\n                print(f"|first - second|_2 = {max_diff} > {tol} ")\n                # Deletage to numpy. Let numpy make nicer messages.\n                np.testing.assert_almost_equal(first, second) # This function does not take a msg parameter.\n                # Make sure to throw an error no matter what.\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=msg + f"Not equal within tolerance {tol}")\n\n    @classmethod\n    def _cache_file(cls):\n        return os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__ + ".pkl"\n\n    @classmethod\n    def _artifact_file_for_setUpClass(cls):\n        file = os.path.join(os.path.dirname(cls._cache_file()), ""+cls.__name__+"-setUpClass.json")\n        print("_artifact_file_for_setUpClass(cls): will return", file, "__class__", cls)\n        # cf = os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__\n        return file\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self.__class__._cache_file()), \'-\'.join(self.cache_id()) + ".json")\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self.__class__._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache is not None:  # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self.__class__._cache_file()\n        if os.path.exists(cfile):\n            try:\n                with open(cfile, \'rb\') as f:\n                    data = pickle.load(f)\n                self.__class__._cache = data\n            except Exception as e:\n                print("Cache file did not exist:", cfile)\n                print(e)\n        else:\n            print("Warning! data file not found", cfile)\n\n    def _get_coverage_files(self):\n        key = (self.cache_id(), \'coverage\')\n        # CC = None\n        # if self._cache_contains(key):\n        return self._cache_get(key, []) # Anything wrong with the empty list?\n        # return CC\n\n    def _get_hints(self):\n        """\n            This code is run when the test is set up to generate the hints and store them in an artifact file. It may be beneficial to simple compute them beforehand\n            and store them in the local unitgrade pickle file. This code is therefore expected to superceede the alterative code later.\n        """\n        hints = []\n        # print("Getting hint")\n        key = (self.cache_id(), \'coverage\')\n        if self._cache_contains(key):\n            CC = self._cache_get(key)\n            # cl, m = self.cache_id()\n            # print("Getting hint using", CC)\n            # Insert newline to get better formatting.\n            # gprint(\n            #     f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n            for file in CC:\n                rec = CC[file]\n                # gprint(f">   * {file}")\n                for l in rec:\n                    _, comments = CC[file][l]\n                    hint = get_hints(comments)\n\n                    if hint != None:\n                        hints.append((hint, file, l))\n\n        doc = self._testMethodDoc\n        # print("doc", doc)\n        if doc is not None:\n            hint = get_hints(self._testMethodDoc)\n            if hint is not None:\n                hints = [(hint, None, self.cache_id()[1])] + hints\n\n        return hints\n\n    def _feedErrorsToResult(self, result, errors):\n        """ Use this to show hints on test failure.\n        It feeds error to the result -- so if there are errors, they will crop up here\n        """\n        self._error_fed_during_run = errors.copy() # import to copy the error list.\n\n        # result._test._error_fed_during_run = errors.copy()\n\n        if not isinstance(result, UTextResult):\n            er = [e for e, v in errors if v != None]\n            # print("Errors are", errors)\n            if len(er) > 0:\n                hints = []\n                key = (self.cache_id(), \'coverage\')\n                if self._cache_contains(key):\n                    CC = self._cache_get(key)\n                    cl, m = self.cache_id()\n                    # Insert newline to get better formatting.\n                    gprint(f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n                    for file in CC:\n                        rec = CC[file]\n                        gprint(f">   * {file}")\n                        for l in rec:\n                            _, comments = CC[file][l]\n                            hint = get_hints(comments)\n\n                            if hint != None:\n                                hints.append((hint, file, l) )\n                            gprint(f">      - {l}")\n\n                er = er[0]\n\n                doc = er._testMethodDoc\n                # print("doc", doc)\n                if doc is not None:\n                    hint = get_hints(er._testMethodDoc)\n                    if hint is not None:\n                        hints = [(hint, None, self.cache_id()[1] )] + hints\n                if len(hints) > 0:\n                    # print(hints)\n                    for hint, file, method in hints:\n                        s = (f"\'{method.strip()}\'" if method is not None else "")\n                        if method is not None and file is not None:\n                            s += " in "\n                        try:\n                            s += (file.strip() if file is not None else "")\n                            gprint(">")\n                            gprint("> Hints (from " + s + ")")\n                            gprint(textwrap.indent(hint, ">   "))\n                        except Exception as e:\n                            print("Bad stuff in hints. ")\n                            print(hints)\n        # result._last_errors = errors\n        super()._feedErrorsToResult(result, errors)\n        b = 234\n\n    def startTestRun(self):\n        super().startTestRun()\n\nclass Required:\n    pass\n\nclass ParticipationTest(UTestCase,Required):\n    max_group_size = None\n    students_in_group = None\n    workload_assignment = {\'Question 1\': [1, 0, 0]}\n\n    def test_students(self):\n        pass\n\n    def test_workload(self):\n        pass\n\n# 817, 705\nclass NotebookTestCase(UTestCase):\n    notebook = None\n    _nb = None\n    @classmethod\n    def setUpClass(cls) -> None:\n        with Capturing():\n            cls._nb = importnb.Notebook.load(cls.notebook)\n\n    @property\n    def nb(self):\n        return self.__class__._nb\n # 870.\n\nimport hashlib\nimport io\nimport tokenize\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,\n                            show_tol_err=False, show_privisional=True, noprogress=None,\n                            generate_artifacts=True):\n    args = parser.parse_args()\n    if noprogress is None:\n        noprogress = args.noprogress\n\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,\n                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err,\n                                          generate_artifacts=generate_artifacts)\n\n\n    if question is None and show_privisional:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        # testcase_methods = list(testCaseClass.__dict__.keys())\n        ls = []\n        for C in testCaseClass.mro():\n            if issubclass(C, unittest.TestCase):\n                ls = list(C.__dict__.keys()) + ls\n        testcase_methods = ls\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n    # print("Started: " + dt_string)\n    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += f" version {report.version}"\n    print(s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n        if not report.abbreviate_questions:\n            q_title_print = "Question %i: %s"%(n+1, qtitle)\n        else:\n            q_title_print = "q%i) %s" % (n + 1, qtitle)\n\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        # q_ = {} # Gather score in this class.\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n        UTextResult.number = n\n        UTextResult.nL = report.nL\n        UTextResult.unmute = unmute # Hacky as well.\n        UTextResult.setUpClass_time = q._cache.get(((q.__name__, \'setUpClass\'), \'time\'), 3) if hasattr(q, \'_cache\') and q._cache is not None else 3\n\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        details = {}\n        for s, msg in res.successes + res.failures + res.errors:\n            # from unittest.suite import _ErrorHolder\n            # from unittest import _Err\n            # if isinstance(s, _ErrorHolder)\n            if hasattr(s, \'_testMethodName\'):\n                key = (q.__name__, s._testMethodName)\n            else:\n                # In case s is an _ErrorHolder (unittest.suite)\n                key = (q.__name__, s.id())\n            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.\n\n            detail = {}\n            if (s,msg) in res.successes:\n                detail[\'status\'] = "pass"\n            elif (s,msg) in res.failures:\n                detail[\'status\'] = \'fail\'\n            elif (s,msg) in res.errors:\n                detail[\'status\'] = \'error\'\n            else:\n                raise Exception("Status not known.")\n\n            nice_title = s.title\n            detail = {**detail, **msg, \'nice_title\': nice_title}#[\'message\'] = msg\n            details[key] = detail\n\n        # q_[s._testMethodName] = ("pass", None)\n        # for (s,msg) in res.failures:\n        #     q_[s._testMethodName] = ("fail", msg)\n        # for (s,msg) in res.errors:\n        #     q_[s._testMethodName] = ("error", msg)\n        # res.successes[0]._get_outcome()\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': details, \'title\': qtitle, \'name\': q.__name__,\n                   }\n        q.obtained = obtained\n        q.possible = possible\n        # print(q._cache)\n        # print(q._covcache)\n        s1 = f" * q{n+1})   Total"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\ndef python_code_str_id(python_code, strip_comments_and_docstring=True):\n    s = python_code\n\n    if strip_comments_and_docstring:\n        try:\n            s = remove_comments_and_docstrings(s)\n        except Exception as e:\n            print("--"*10)\n            print(python_code)\n            print(e)\n\n    s = "".join([c.strip() for c in s.split()])\n    hash_object = hashlib.blake2b(s.encode())\n    return hash_object.hexdigest()\n\n\ndef file_id(file, strip_comments_and_docstring=True):\n    with open(file, \'r\') as f:\n        # s = f.read()\n        return python_code_str_id(f.read())\n\n\ndef remove_comments_and_docstrings(source):\n    """\n    Returns \'source\' minus comments and docstrings.\n    """\n    io_obj = io.StringIO(source)\n    out = ""\n    prev_toktype = tokenize.INDENT\n    last_lineno = -1\n    last_col = 0\n    for tok in tokenize.generate_tokens(io_obj.readline):\n        token_type = tok[0]\n        token_string = tok[1]\n        start_line, start_col = tok[2]\n        end_line, end_col = tok[3]\n        ltext = tok[4]\n        # The following two conditionals preserve indentation.\n        # This is necessary because we\'re not using tokenize.untokenize()\n        # (because it spits out code with copious amounts of oddly-placed\n        # whitespace).\n        if start_line > last_lineno:\n            last_col = 0\n        if start_col > last_col:\n            out += (" " * (start_col - last_col))\n        # Remove comments:\n        if token_type == tokenize.COMMENT:\n            pass\n        # This series of conditionals removes docstrings:\n        elif token_type == tokenize.STRING:\n            if prev_toktype != tokenize.INDENT:\n        # This is likely a docstring; double-check we\'re not inside an operator:\n                if prev_toktype != tokenize.NEWLINE:\n                    # Note regarding NEWLINE vs NL: The tokenize module\n                    # differentiates between newlines that start a new statement\n                    # and newlines inside of operators such as parens, brackes,\n                    # and curly braces.  Newlines inside of operators are\n                    # NEWLINE and newlines that start new code are NL.\n                    # Catch whole-module docstrings:\n                    if start_col > 0:\n                        # Unlabelled indentation means we\'re inside an operator\n                        out += token_string\n                    # Note regarding the INDENT token: The tokenize module does\n                    # not label indentation inside of an operator (parens,\n                    # brackets, and curly braces) as actual indentation.\n                    # For example:\n                    # def foo():\n                    #     "The spaces before this docstring are tokenize.INDENT"\n                    #     test = [\n                    #         "The spaces before this string do not get a token"\n                    #     ]\n        else:\n            out += token_string\n        prev_toktype = token_type\n        last_col = end_col\n        last_lineno = end_line\n    return out\n\nimport textwrap\nimport bz2\nimport pickle\nimport os\nimport zipfile\nimport io\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    f = m.__file__\n    if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'):\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        im = __import__(m.__name__.split(\'.\')[0])\n        if isinstance(im, list):\n            print("im is a list")\n            print(im)\n        # the __path__ attribute *may* be a string in some cases. I had to fix this.\n        print("path.:",  __import__(m.__name__.split(\'.\')[0]).__path__)\n        # top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__[0]\n        module_import = False\n\n    found_hashes = {}\n    # pycode = {}\n    resources[\'pycode\'] = {}\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)\n                    zip.write(fpath, v)\n                    if not fpath.endswith("_grade.py"): # Exclude grade files.\n                        with open(fpath, \'r\') as f:\n                            s = f.read()\n                        found_hashes[v] = python_code_str_id(s)\n                        resources[\'pycode\'][v] = s\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    resources[\'blake2b_file_hashes\'] = found_hashes\n    return resources, top_package\n\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_report_source_include(report):\n    sources = {}\n    # print("")\n    # if not args.autolab:\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            _, report_relative_location, module_import = report._import_base_relative()\n\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'report_module_specification\'] = module_import\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            print(f" * {m.__name__}")\n    return sources\n\ndef gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):\n    # n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab,\n                                          generate_artifacts=False,\n                                          )\n    print("")\n    sources = {}\n    if not args.autolab:\n        results[\'sources\'] = sources = gather_report_source_include(report)\n\n    token_plain = """\n# This file contains your results. Do not edit its content. Simply upload it as it is. """\n\n    s_include = [token_plain]\n    known_hashes = []\n    cov_files = []\n    use_coverage = True\n    if report._config is not None:\n        known_hashes = report._config[\'blake2b_file_hashes\']\n        for Q, _ in report.questions:\n            use_coverage = use_coverage and isinstance(Q, UTestCase)\n            for key in Q._cache:\n                if len(key) >= 2 and key[1] == "coverage":\n                    for f in Q._cache[key]:\n                        cov_files.append(f)\n\n    for s in sources.values():\n        for f_rel, hash in s[\'blake2b_file_hashes\'].items():\n            if hash in known_hashes and f_rel not in cov_files and use_coverage:\n                print("Skipping", f_rel)\n            else:\n                if token_include_plaintext_source:\n                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)\n                    s_include.append("")\n                    s_include.append(s[\'pycode\'][f_rel])\n                    s_include.append("")\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = f"_v{report.version}" if report.version is not None else ""\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.normpath(os.path.join(output_dir, token))\n\n    save_token(results, "\\n".join(s_include), token)\n\n    if not args.autolab:\n        print("> Testing token file integrity...", sep="")\n        load_token(token)\n        print("Done!")\n        print(" ")\n        print("To get credit for your results, please upload the single unmodified file: ")\n        print(">", token)\n\n\ndef save_token(dictionary, plain_text, file_out):\n    if plain_text is None:\n        plain_text = ""\n    if len(plain_text) == 0:\n        plain_text = "Start token file"\n    plain_text = plain_text.strip()\n    b, b_hash = dict2picklestring(dictionary)\n    b_l1 = len(b)\n    b = "."+b+"."\n    b = "\\n".join( textwrap.wrap(b, 180))\n\n    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]\n    with open(file_out, \'w\') as f:\n        f.write("\\n".join(out))\n\n\n\n\ndef source_instantiate(name, report1_source, payload):\n    # print("Executing sources", report1_source)\n    eval("exec")(report1_source, globals())\n    # print("Loaind gpayload..")\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    return report\n\n\n__version__ = "0.1.28.8"\n\nfrom cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo\nimport time\nimport numpy as np\nimport pickle\nimport os\n# from unitgrade.framework import dash\n\ndef mk_bad():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 100, \'x2\': 300}\n        pickle.dump(d, f)\n\ndef mk_ok():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 1, \'x2\': 2}\n        pickle.dump(d, f)\n\nclass Numpy(UTestCase):\n    z = 234\n\n    # def __getattr__(self, item):\n    #     print("hi there ", item)\n    #     return super().__getattr__(item)\n    #\n    # def __getattribute__(self, item):\n    #     print("oh hello sexy. ", item)\n    #     return super().__getattribute__(item)\n\n    @classmethod_dashboard\n    def setUpClass(cls) -> None:\n        print("Dum di dai, I am running some setup code here.")\n        for i in range(10):\n            print("Hello world", i)\n        print("Set up.") # must be handled seperately.\n        # assert False\n\n    # @cache\n    # def make_primes(self, n):\n    #     return primes(n)\n\n    # def setUp(self) -> None:\n    #     print("We are doing the setup thing.")\n\n    def test_bad(self):\n        """\n        Hints:\n            * Remember to properly de-indent your code.\n            * Do more stuff which works.\n        """\n        # raise Exception("This ended poorly")\n        # print("Here we go")\n        # return\n        # self.assertEqual(1, 1)\n        with open(os.path.dirname(__file__)+"/db.pkl", \'rb\') as f:\n            d = pickle.load(f)\n        # print(d)\n        # assert False\n        # for i in range(10):\n        from tqdm import tqdm\n        for i in tqdm(range(100)):\n            # print("The current number is", i)\n            time.sleep(.01)\n        self.assertEqual(1, d[\'x1\'])\n        for b in range(10):\n            self.assertEqualC(add(3, b))\n\n\n    def test_weights(self):\n        """\n            Hints:\n            * Try harder!\n            * Check the chapter on linear regression.\n        """\n        n = 3\n        m = 2\n        np.random.seed(5)\n        # from numpy import asdfaskdfj\n        # X = np.random.randn(n, m)\n        # y = np.random.randn(n)\n        foo()\n        # assert 2 == 3\n        # raise Exception("Bad exit")\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertL2(linear_regression_weights(X, y), msg="the message")\n        self.assertEqual(1, 1)\n        # self.assertEqual(1,2)\n        return "THE RESULT OF THE TEST"\n\n\nclass AnotherTest(UTestCase):\n    def test_more(self):\n        self.assertEqual(2,2)\n\n    def test_even_more(self):\n        self.assertEqual(2,2)\n\nimport cs108\nclass Report2(Report):\n    title = "CS 101 Report 2"\n    questions = [\n        (Numpy, 10), (AnotherTest, 20)\n        ]\n    pack_imports = [cs108]'
-report1_payload = '80049502030000000000007d94288c054e756d7079947d942868018c0a7365745570436c6173739486948c0474696d65948694473f38e8000000000068018c08746573745f6261649486948c057469746c6594869468076801680786948c066173736572749486947d94284b004b034b014b044b024b054b034b064b044b074b054b084b064b094b074b0a4b084b0b4b094b0c7568016807869468058694473ff06c5e0000000068018c0c746573745f77656967687473948694680986946811680168118694680c86947d9468016811869468058694473efa400000000000758c0b416e6f7468657254657374947d942868196803869468058694473f1470000000000068198c09746573745f6d6f7265948694680c86947d946819681d869468058694473ed700000000000068198c0e746573745f6576656e5f6d6f7265948694680c86947d9468196823869468058694473ed5000000000000758c06636f6e666967947d948c13626c616b6532625f66696c655f686173686573945d94288c806362363363336235383635306636313037643763663138646136303635666135373835666261626564643135316639653761633335313139323635623039393838623266653335373632303961333932616133656236633134636131316439646335393937343831633531373863313533393665656662313539653163373536948c803434656331613338643134373639626433653234323663386232366539303830356336313361386161653266333966663665633433363133666562363465303739373435323062306536353134353063303637623763633637636631366134313835653736346334383331373763333335303063626563626362336234646466948c803638306336353638323633623832303737313365616434306539323663643265363835336130613936353861386338343738393564363633643730643262343666616163333336396133636564366239623964303436346563316366656465326235306265376432626636313432313638383936663332306338353232313066946573752e'
+report1_source = '# from unitgrade import hide\n# from unitgrade import utils\n# import os\n# import lzma\n# import pickle\n\n# DONT\'t import stuff here since install script requires __version__\n\n# def cache_write(object, file_name, verbose=True):\n#     # raise Exception("bad")\n#     # import compress_pickle\n#     dn = os.path.dirname(file_name)\n#     if not os.path.exists(dn):\n#         os.mkdir(dn)\n#     if verbose: print("Writing cache...", file_name)\n#     with lzma.open(file_name, \'wb\', ) as f:\n#         pickle.dump(object, f)\n#     if verbose: print("Done!")\n#\n#\n# def cache_exists(file_name):\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     return os.path.exists(file_name)\n#\n#\n# def cache_read(file_name):\n#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     if os.path.exists(file_name):\n#         try:\n#             with lzma.open(file_name, \'rb\') as f:\n#                 return pickle.load(f)\n#         except Exception as e:\n#             print("Tried to load a bad pickle file at", file_name)\n#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n#             print(e)\n#             # return pickle.load(f)\n#     else:\n#         return None\n\n\n\nimport re\nimport sys\nimport threading\nimport time\nimport lzma\nimport hashlib\nimport pickle\nimport base64\nimport os\nfrom collections import namedtuple\nfrom io import StringIO\nimport numpy as np\nimport tqdm\nfrom colorama import Fore\nfrom functools import _make_key\nfrom diskcache import Cache\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef gprint(s):\n    print(f"{Fore.LIGHTGREEN_EX}{s}")\n\nmyround = lambda x: np.round(x)  # required for obfuscation.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n"""\nClean up the various output-related helper classes.\n"""\nclass Logger(object):\n    def __init__(self, buffer, write_to_stdout=True):\n        # assert False\n        self.terminal = sys.stdout\n        self.write_to_stdout = write_to_stdout\n        self.log = buffer\n\n    def write(self, message):\n        if self.write_to_stdout:\n            self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\n\nclass Capturing(list):\n    def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n        self._stdout = stdout\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True):  # don\'t put arguments here.\n        self._stdout = sys.stdout if self._stdout == None else self._stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO()  # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n    def __exit__(self, *args):\n        lines = self._stringio.getvalue().splitlines()\n        txt = "\\n".join(lines)\n        numbers = extract_numbers(rm_progress_bar(txt))\n        self.extend(lines)\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n        self.output = txt\n        self.numbers = numbers\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct + 1)\n            if i > 0 and l.find("|", i + 1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None, mute_stdout=False):\n        if file == None:\n            file = sys.stdout\n        self.file = file\n        self.mute_stdout = mute_stdout\n        self._running = False\n        self.title = title\n        self.dt = 0.025\n        self.n = max(1, int(np.round(t / self.dt)))\n        self.show_progress_bar = show_progress_bar\n        self.pbar = None\n\n        if start:\n            self.start()\n\n    def start(self):\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            self._stdout = sys.stdout\n            sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            print("Stopping a progress bar which is not running (class unitgrade.utils.ActiveProgress")\n            pass\n            # raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar = None\n\n        self.file.flush()\n\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            sys.stdout = self._stdout #= sys.stdout\n\n            # sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n        t_ = time.time()\n        for _ in range(self.n - 1):  # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n            tc = time.time()\n            tic = max(0, self.dt - (tc - t_))\n            if tic > 0:\n                time.sleep(tic)\n            t_ = time.time()\n            self.pbar.update(1)\n\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n    if file == None:\n        file = sys.stdout\n    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n    print(first + dot_parts, end="", file=file)\n    last += extra\n    print(last, file=file)\n\n\ndef hide(func):\n    return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    return newDecorator\n\n\nhide = makeRegisteringDecorator(hide)\n\n\ndef extract_numbers(txt):\n    numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))\n    return all\n\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n        # print(self._cache.keys())\n        # for k in self._cache:\n        #     print(k)\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n            # This appears to be required since there are two caches. Otherwise, when deploy method is run twice,\n            # the cache will not be set correctly.\n            self._cache_put(key, value)\n        return value\n\n    return wrapper\n\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n""" Methods responsible for turning a dictionary into a string that can be pickled or put into a json file. """\ndef dict2picklestring(dd):\n    """\n    Turns a dictionary into a string with some compression.\n\n    :param dd:\n    :return:\n    """\n    b = lzma.compress(pickle.dumps(dd))\n    b_hash = hashlib.blake2b(b).hexdigest()\n    return base64.b64encode(b).decode("utf-8"), b_hash\n\ndef picklestring2dict(picklestr):\n    """ Reverse of the above method: Turns the string back into a dictionary. """\n    b = base64.b64decode(picklestr)\n    hash = hashlib.blake2b(b).hexdigest()\n    dictionary = pickle.loads(lzma.decompress(b))\n    return dictionary, hash\n\ntoken_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70\ndef load_token(file_in):\n    """ We put this one here to allow loading of token files for the dashboard. """\n    with open(file_in, \'r\') as f:\n        s = f.read()\n    splt = s.split(token_sep)\n    data = splt[-1]\n    info = splt[-2]\n    head = token_sep.join(splt[:-2])\n    plain_text=head.strip()\n    hash, l1 = info.split(" ")\n    data = "".join( data.strip()[1:-1].splitlines() )\n    l1 = int(l1)\n    dictionary, b_hash = picklestring2dict(data)\n    assert len(data) == l1\n    assert b_hash == hash.strip()\n    return dictionary, plain_text\n\n\n\n## Key/value store related.\nclass DKPupDB:\n    """ This key/value store store artifacts (associated with a specific question) in a dictionary. """\n    def __init__(self, artifact_file, use_pupdb=False):\n        # Make a double-headed disk cache thingy.\n        self.dk = Cache(os.path.dirname(artifact_file)) # Start in this directory.\n        self.name_ = os.path.basename(artifact_file[:-5])\n        if self.name_ not in self.dk:\n            self.dk[self.name_] = dict()\n        self.use_pupdb = use_pupdb\n        if self.use_pupdb:\n            from pupdb.core import PupDB\n            self.db_ = PupDB(artifact_file)\n\n    def __setitem__(self, key, value):\n        if self.use_pupdb:\n            self.db_.set(key, value)\n        with self.dk.transact():\n            d = self.dk[self.name_]\n            d[key] = value\n            self.dk[self.name_] = d\n            self.dk[self.name_ + "-updated"] = True\n\n    def __getitem__(self, item):\n        v = self.dk[self.name_][item]\n        if self.use_pupdb:\n            v2 = self.db_.get(item)\n            if v != v2:\n                print("Mismatch v1, v2 for ", item)\n        return v\n\n    def keys(self): # This one is also deprecated.\n        return tuple(self.dk[self.name_].keys()) #.iterkeys())\n        # return self.db_.keys()\n\n    def set(self, item, value): # This one is deprecated.\n        self[item] = value\n\n    def get(self, item, default=None):\n        return self[item] if item in self else default\n\n    def __contains__(self, item):\n        return item in self.dk[self.name_] #keys()\n        # return item in self.dk\n\n\nimport io\nimport sys\nimport time\nimport unittest\nfrom unittest.runner import _WritelnDecorator\nimport numpy as np\n\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    number = -1  # HAcky way to set question number.\n    show_progress_bar = True\n    unmute = False # Whether to redirect stdout.\n    cc = None\n    setUpClass_time = 3 # Estimated time to run setUpClass in TestCase. Must be set externally. See key (("ClassName", "setUpClass"), "time") in _cache.\n\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # TODO: Fix here. probably also needs to flush stdout.\n        self.printErrorList(\'ERROR\', [(test, res[\'stderr\']) for test, res in self.errors])\n        self.printErrorList(\'FAIL\',  [(test, res[\'stderr\']) for test, res in self.failures])\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addError(test, err)\n        err = self.errors[-1][1]\n        if hasattr(sys.stdout, \'log\'):\n            stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        else:\n            stdout = ""\n        self.errors[-1] = (self.errors[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n\n        if not hasattr(self, \'item_title_print\'):\n            # In case setUpClass() fails with an error the short description may not be set. This will fix that problem.\n            self.item_title_print = test.shortDescription()\n            if self.item_title_print is None:  # In case the short description is not set either...\n                self.item_title_print = test.id()\n\n\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        err = self.failures[-1][1]\n        stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        self.failures[-1] = (self.failures[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n        self.cc_terminate(success=False)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        msg = None\n        stdout = sys.stdout.log.readlines() # Only works because we set sys.stdout to a unitgrade.Logger\n\n        if hasattr(test, \'_get_outcome\'):\n            o = test._get_outcome()\n            if isinstance(o, dict):\n                key = (test.cache_id(), "return")\n                if key in o:\n                    msg = test._get_outcome()[key]\n\n        # print(sys.stdout.readlines())\n        self.successes.append((test, None))  # (test, message) (to be consistent with failures and errors).\n        self.successes[-1] = (self.successes[-1][0], {\'return\': msg,\n                                 \'stdout\': stdout,\n                                 \'stderr\': None})\n\n        self.cc_terminate()\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            self.cc.file.flush()\n            ss = self.item_title_print\n\n            state = "PASS" if success else "FAILED"\n\n            dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n            if self.show_progress_bar or True:\n                print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n            else:\n                print(dot_parts, end="", file=self.cc.file)\n\n            if tsecs >= 0.5:\n                state += " (" + str(tsecs) + " seconds)"\n            print(state, file=self.cc.file)\n\n    def startTest(self, test):\n        name = test.__class__.__name__\n        if self.testsRun == 0 and hasattr(test.__class__, \'_cache2\'): # Disable this if the class is pure unittest.TestCase\n            # This is the first time we are running a test. i.e. we can time the time taken to call setupClass.\n            if test.__class__._cache2 is None:\n                test.__class__._cache2 = {}\n            test.__class__._cache2[((name, \'setUpClass\'), \'time\')] = time.time() - self.t_start\n\n        self.testsRun += 1\n        item_title = test.shortDescription()  # Better for printing (get from cache).\n\n        if item_title == None:\n            # For unittest framework where getDescription may return None.\n            item_title = self.getDescription(test)\n        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n        # if self.show_progress_bar or True:\n        estimated_time = test.__class__._cache.get(((name, test._testMethodName), \'time\'), 100) if hasattr(test.__class__, \'_cache\') else 4\n        self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        # else:\n        #     print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n        self._test = test\n        # if not self.unmute:\n        self._stdout = sys.stdout # Redundant. remove later.\n        sys.stdout = Logger(io.StringIO(), write_to_stdout=self.unmute)\n\n    def stopTest(self, test):\n        # if not self.unmute:\n        buff = sys.stdout.log\n        sys.stdout = self._stdout # redundant.\n        buff.close()\n        super().stopTest(test)\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            self.t_start = time.time()\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.framework.py>"\n\n            cc = ActiveProgress(t=self.setUpClass_time, title=q_title_print, show_progress_bar=self.show_progress_bar, mute_stdout=not self.unmute)\n            self.cc = cc\n\n\n    def _restoreStdout(self):  # Used when setting up the test.\n        if self._previousTestClass is None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            if self.show_progress_bar:\n                print(self.cc.title, end="")\n            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        stream = io.StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\nimport importnb\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport inspect\nimport colorama\nimport unittest\nimport time\nimport textwrap\nimport urllib.parse\nimport requests\nimport ast\nimport numpy\nfrom unittest.case import TestCase\n\n\ncolorama.init(autoreset=True)  # auto resets your settings after every output\nnumpy.seterr(all=\'raise\')\n\ndef setup_dir_by_class(C, base_dir):\n    name = C.__class__.__name__\n    return base_dir, name\n\n\n_DASHBOARD_COMPLETED_MESSAGE = "Dashboard> Evaluation completed."\n\n# Consolidate this code.\nclass classmethod_dashboard(classmethod):\n    def __init__(self, f):\n        def dashboard_wrap(cls: UTestCase):\n            if not cls._generate_artifacts:\n                f(cls)\n                return\n            db = DKPupDB(cls._artifact_file_for_setUpClass())\n            r = np.random.randint(1000 * 1000)\n            db.set(\'run_id\', r)\n            db.set(\'coverage_files_changed\', None)\n\n            state_ = \'fail\'\n            try:\n                _stdout = sys.stdout\n                _stderr = sys.stderr\n                std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n                # Run this unittest and record all of the output.\n                # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n                # sys.stdout = stdout_capture\n                sys.stderr = std_capture.dummy_stderr\n                sys.stdout = std_capture.dummy_stdout\n                db.set("state", "running")\n                f(cls)\n                state_ = \'pass\'\n            except Exception as e:\n                from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n                state_ = \'fail\'\n                db.set(\'state\', state_)\n                exi = e\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                raise e\n            finally:\n                db.set(\'state\', state_)\n                std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n                sys.stdout = _stdout\n                sys.stderr = _stderr\n                std_capture.close()\n        super().__init__(dashboard_wrap)\n\nclass Report:\n    title = "report title"\n    abbreviate_questions = False # Should the test items start with \'Question ...\' or just be q1).\n    version = None # A version number of the report (1.0). Used to compare version numbers with online resources.\n    url = None  # Remote location of this problem.\n\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    _remote_check_cooldown_seconds = 1  # Seconds between remote check of report.\n    nL = 120  # Maximum line width\n    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.\n    _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc.\n\n    @classmethod\n    def reset(cls):\n        for (q, _) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    @classmethod\n    def mfile(clc):\n        return inspect.getfile(clc)\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._file()), "unitgrade_data/main_config_"+ os.path.basename(self._file()[:-3]) + ".artifacts.pkl")\n\n    def _is_run_in_grade_mode(self):\n        """ True if this report is being run as part of a grade run. """\n        return self._file().endswith("_grade.py") # Not sure I love this convention.\n\n    def _import_base_relative(self):\n        if hasattr(self.pack_imports[0], \'__path__\'):\n            root_dir = self.pack_imports[0].__path__[0]\n        else:\n            root_dir = self.pack_imports[0].__file__\n\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n        relative_path = relative_path.replace("\\\\", "/")\n        return root_dir, relative_path, modules\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        for (q, _) in self.questions:\n            q.nL = self.nL  # Set maximum line length.\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        loader = unittest.TestLoader()\n        for q, _ in self.questions:\n            start = time.time()  #\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time() - start\n            q.time = total\n\n    def _setup_answers(self, with_coverage=False, verbose=True):\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = True\n                q._report = self\n        for q, _ in self.questions:\n            q._setup_answers_mode = True\n            # q._generate_artifacts = False # Disable artifact generation when the report is being set up.\n\n        evaluate_report_student(self, unmute=verbose, noprogress=not verbose, generate_artifacts=False) # Disable artifact generation.\n\n        # self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            # print(self.questions)\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                # print("q is", q())\n                report_cache[q.__qualname__] = q._cache2\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in framework.py\': True}\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = False\n\n        # report_cache is saved on a per-question basis.\n        # it could also contain additional information such as runtime metadata etc. This may not be appropriate to store with the invidivual questions(?).\n        # In this case, the function should be re-defined.\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n        self._config = payloads[\'config\']\n\n    def _check_remote_versions(self):\n        if self.url is None:\n            return\n        url = self.url\n        if not url.endswith("/"):\n            url += "/"\n        snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot"\n        if os.path.isfile(snapshot_file):\n            with open(snapshot_file, \'r\') as f:\n                t = f.read()\n                if (time.time() - float(t)) < self._remote_check_cooldown_seconds:\n                    return\n\n        if self.url.startswith("https://gitlab"):\n            # Try to turn url into a \'raw\' format.\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            # url = self.url\n            url = url.replace("-/tree", "-/raw")\n            # print(url)\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false")\n            # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n            if self._is_run_in_grade_mode():\n                remote_source = requests.get(raw_url).text\n                with open(self._file(), \'r\') as f:\n                    local_source = f.read()\n                if local_source != remote_source:\n                    print("\\nThe local version of this report is not identical to the remote version which can be found at")\n                    print(self.url)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of grade script does not match the remote version. Please update using git pull")\n            else:\n                text = requests.get(raw_url).text\n                node = ast.parse(text)\n                classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                for b in classes.body:\n                    # print(b.)\n                    if b.targets[0].id == "version":\n                        # print(b)\n                        # print(b.value)\n                        version_remote = b.value.value\n                        break\n                if version_remote != self.version:\n                    print("\\nThe version of this report", self.version, "does not match the version of the report on git", version_remote)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.")\n\n                for (q,_) in self.questions:\n                    qq = q(skip_remote_check=True)\n                    cfile = q._cache_file()\n\n                    relpath = os.path.relpath(cfile, os.path.dirname(self._file()))\n                    relpath = relpath.replace("\\\\", "/")\n                    raw_url = urllib.parse.urljoin(url, relpath + "?inline=false")\n                    # requests.get(raw_url)\n\n                    with open(cfile, \'rb\') as f:\n                        b1 = f.read()\n\n                    b2 = requests.get(raw_url).content\n                    if b1 != b2:\n                        print("\\nQuestion ", qq.title, "relies on the data file", cfile)\n                        print("However, it appears that this file is missing or in a different version than the most recent found here:")\n                        print(self.url)\n                        print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                        print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                        print("This can be done by simply running the command")\n                        print("> git pull")\n                        print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!")\n                        raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.")\n\n                t = time.time()\n                if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"):\n                    with open(snapshot_file, \'w\') as f:\n                        f.write(f"{t}")\n\ndef get_hints(ss):\n    """ Extract all blocks of the forms:\n\n    Hints:\n    bla-bla.\n\n    and returns the content unaltered.\n    """\n    if ss == None:\n        return None\n    try:\n        ss = textwrap.dedent(ss)\n        ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n        hints = ["hints:", "hint:"]\n        indexes = [ss.lower().find(h) for h in hints]\n        j = np.argmax(indexes)\n        if indexes[j] == -1:\n            return None\n        h = hints[j]\n        ss = ss[ss.lower().find(h) + len(h) + 1:]\n        ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n        ss = textwrap.dedent(ss).strip()\n        # if ss.startswith(\'*\'):\n        #     ss = ss[1:].strip()\n        return ss\n    except Exception as e:\n        print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n    # a = 234\n    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache. Ensures method always produce same result.\n    _cache2 = None  # User-written cache.\n    _with_coverage = False\n    _covcache = None # Coverage cache. Written to if _with_coverage is true.\n    _report = None  # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n    _run_in_report_mode = True\n\n    _generate_artifacts = True # Whether the file will generate the artifact .json files. This is used in the _grade-script mode.\n    # If true, the tests will not fail when cache is used. This is necesary since otherwise the cache will not be updated\n    # during setup, and the deploy script must be run many times.\n    _setup_answers_mode = False\n\n    def capture(self):\n        if hasattr(self, \'_stdout\') and self._stdout is not None:\n            file = self._stdout\n        else:\n            file = sys.stdout\n        return Capturing2(stdout=file)\n\n    @classmethod\n    def question_title(cls):\n        """ Return the question title """\n        if cls.__doc__ is not None:\n            title = cls.__doc__.strip().splitlines()[0].strip()\n            if not (title.startswith("Hints:") or title.startswith("Hint:") ):\n                return title\n        return cls.__qualname__\n\n    def run(self, result):\n        # print("Run called in test framework...", self._generate_artifacts)\n        if not self._generate_artifacts:\n            return super().run(result)\n\n        db = DKPupDB(self._artifact_file())\n        db.set("state", "running")\n        db.set(\'run_id\', np.random.randint(1000*1000))\n        db.set(\'coverage_files_changed\', None)\n\n\n        _stdout = sys.stdout\n        _stderr = sys.stderr\n\n        std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n        # stderr_capture = StdCapturing(sys.stderr, db=db)\n        # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db)\n        state_ = None\n        try:\n            # Run this unittest and record all of the output.\n            # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n            # sys.stdout = stdout_capture\n            sys.stderr = std_capture.dummy_stderr\n            sys.stdout = std_capture.dummy_stdout\n\n            result_ = TestCase.run(self, result)\n\n            from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n            # print(result_._excinfo[0])\n            actual_errors = []\n            for test, err in self._error_fed_during_run:\n                if err is None:\n                    continue\n                else:\n                    import traceback\n                    # traceback.print_tb(err[2])\n                    actual_errors.append(err)\n\n            if len(actual_errors) > 0:\n                ex, exi, tb = actual_errors[0]\n                exi.__traceback__ = tb\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                # db.set(\'state\', \'fail\')\n                state_ = "fail"\n            else:\n                state_ = "pass"\n        except Exception as e:\n            state_ = "fail"\n            import traceback\n            traceback.print_exc()\n            raise e\n        finally:\n            db.set(\'state\', state_)\n            std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n            sys.stdout = _stdout\n            sys.stderr = _stderr\n            std_capture.close()\n        return result_\n\n    def _callSetUp(self):\n        if self._with_coverage:\n            if self._covcache is None:\n                self._covcache = {}\n            import coverage\n            self.cov = coverage.Coverage(data_file=None)\n            self.cov.start()\n        self.setUp()\n\n    def _callTearDown(self):\n        self.tearDown()\n        # print("Teardown.")\n        if self._with_coverage:\n            # print("with cov")\n            from pathlib import Path\n            from snipper import snipper_main\n            try:\n                self.cov.stop()\n            except Exception as e:\n                print("Something went wrong while tearing down coverage test")\n                print(e)\n            data = self.cov.get_data()\n            base, _, _ = self._report._import_base_relative()\n            for file in data.measured_files():\n                file = os.path.normpath(file)\n                root = Path(base)\n                child = Path(file)\n                if root in child.parents:\n                    # print("Reading file", child)\n                    with open(child, \'r\') as f:\n                        s = f.read()\n                    lines = s.splitlines()\n                    garb = \'GARBAGE\'\n                    lines2 = snipper_main.censor_code(lines, keep=True)\n                    # print("\\n".join(lines2))\n                    if len(lines) != len(lines2):\n                        for k in range(len(lines)):\n                            print(k, ">", lines[k], "::::::::", lines2[k])\n                        print("Snipper failure; line lenghts do not agree. Exiting..")\n                        print(child, "len(lines) == len(lines2)", len(lines), len(lines2))\n                        import sys\n                        sys.exit()\n\n                    assert len(lines) == len(lines2)\n                    for ll in data.contexts_by_lineno(file):\n                        l = ll-1\n                        if l < len(lines2) and lines2[l].strip() == garb:\n                            # print("Got a hit at l", l)\n                            rel = os.path.relpath(child, root)\n                            cc = self._covcache\n                            j = 0\n                            for j in range(l, -1, -1):\n                                if "def" in lines2[j] or "class" in lines2[j]:\n                                    break\n                            from snipper.legacy import gcoms\n\n                            fun = lines2[j]\n                            comments, _ = gcoms("\\n".join(lines2[j:l]))\n                            if rel not in cc:\n                                cc[rel] = {}\n                            cc[rel][fun] = (l, "\\n".join(comments))\n                            # print("found", rel, fun)\n                            self._cache_put((self.cache_id(), \'coverage\'), self._covcache)\n\n    def shortDescriptionStandard(self):\n        sd = super().shortDescription()\n        if sd is None or sd.strip().startswith("Hints:") or sd.strip().startswith("Hint:"):\n            sd = self._testMethodName\n        return sd\n\n    def shortDescription(self):\n        sd = self.shortDescriptionStandard()\n        title = self._cache_get((self.cache_id(), \'title\'), sd)\n        return title if title is not None else sd\n\n    @property\n    def title(self):\n        return self.shortDescription()\n\n    @title.setter\n    def title(self, value):\n        self._cache_put((self.cache_id(), \'title\'), value)\n\n    def _get_outcome(self):\n        if not hasattr(self.__class__, \'_outcome\') or self.__class__._outcome is None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        self._ensure_cache_exists()  # Make sure cache is there.\n        if self._testMethodDoc is not None:\n            self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n        self._cache2[(self.cache_id(), \'assert\')] = {}\n        res = testMethod()\n        elapsed = time.time() - t\n        self._get_outcome()[ (self.cache_id(), "return") ] = res\n        self._cache_put((self.cache_id(), "time"), elapsed)\n\n\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return c, m\n\n    def __init__(self, *args, skip_remote_check=False, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self._assert_cache_index = 0\n        # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the\n        # cache exists locally.\n        # Find the report class this class is defined within.\n        if skip_remote_check:\n            return\n        import importlib, inspect\n        found_reports = []\n        # print("But do I have report", self._report)\n        # print("I think I am module", self.__module__)\n        # print("Importlib says", importlib.import_module(self.__module__))\n        # This will delegate you to the wrong main clsas when running in grade mode.\n        for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass):\n            # print("checking", cls)\n            if issubclass(cls, Report):\n                for q,_ in cls.questions:\n                    if q == self.__class__:\n                        found_reports.append(cls)\n        if len(found_reports) == 0:\n            pass # This case occurs when the report _grade script is being run.\n            # raise Exception("This question is not a member of a report. Very, very odd.")\n        if len(found_reports) > 1:\n            raise Exception("This question is a member of multiple reports. That should not be the case -- don\'t get too creative.")\n        if len(found_reports) > 0:\n            report = found_reports[0]\n            report()._check_remote_versions()\n\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def get_expected_test_value(self):\n        key = (self.cache_id(), \'assert\')\n        id = self._assert_cache_index\n        cache = self._cache_get(key)\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        return _expected\n\n    def wrap_assert(self, assert_fun, first, *args, **kwargs):\n        key = (self.cache_id(), \'assert\')\n        if not self._cache_contains(key):\n            print("Warning, framework missing", key)\n            self.__class__._cache[key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.\n        cache = self._cache_get(key)\n        id = self._assert_cache_index\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        if not id in cache:\n            print("Warning, framework missing cache index", key, "id =", id, " - The test will be skipped for now.")\n            if self._setup_answers_mode:\n                _expected = first # Bypass by setting equal to first. This is in case multiple self.assertEqualC\'s are run in a row and have to be set.\n\n        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n        cache[id] = first\n        self._cache_put(key, cache)\n        self._assert_cache_index += 1\n        if not self._setup_answers_mode:\n            assert_fun(first, _expected, *args, **kwargs)\n        else:\n            try:\n                assert_fun(first, _expected, *args, **kwargs)\n            except Exception as e:\n                print("Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.", id)\n                print("> first", first)\n                print("> expected", _expected)\n                print(e)\n\n\n    def assertEqualC(self, first, msg=None):\n        self.wrap_assert(self.assertEqual, first, msg)\n\n    def _shape_equal(self, first, second):\n        a1 = np.asarray(first).squeeze()\n        a2 = np.asarray(second).squeeze()\n        msg = None\n        msg = "" if msg is None else msg\n        if len(msg) > 0:\n            msg += "\\n"\n        self.assertEqual(a1.shape, a2.shape, msg=msg + "Dimensions of input data does not agree.")\n        assert(np.all(np.isinf(a1) == np.isinf(a2)))  # Check infinite part.\n        a1[np.isinf(a1)] = 0\n        a2[np.isinf(a2)] = 0\n        diff = np.abs(a1 - a2)\n        return diff\n\n    def assertLinf(self, first, second=None, tol=1e-5, msg=None):\n        """ Test in the L_infinity norm.\n        :param first:\n        :param second:\n        :param tol:\n        :param msg:\n        :return:\n        """\n        if second is None:\n            return self.wrap_assert(self.assertLinf, first, tol=tol, msg=msg)\n        else:\n            diff = self._shape_equal(first, second)\n            np.testing.assert_allclose(first, second, atol=tol)\n            \n            max_diff = max(diff.flat)\n            if max_diff >= tol:\n                from unittest.util import safe_repr\n                # msg = f\'{safe_repr(first)} != {safe_repr(second)} : Not equal within tolerance {tol}\'\n                # print(msg)\n                # np.testing.assert_almost_equal\n                # import numpy as np\n                print(f"|first - second|_max = {max_diff} > {tol} ")\n                np.testing.assert_almost_equal(first, second)\n                # If the above fail, make sure to throw an error:\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=f\'Not equal within tolerance {tol}\')\n\n    def assertL2(self, first, second=None, tol=1e-5, msg=None, relative=False):\n        if second is None:\n            return self.wrap_assert(self.assertL2, first, tol=tol, msg=msg, relative=relative)\n        else:\n            # We first test using numpys build-in testing method to see if one coordinate deviates a great deal.\n            # This gives us better output, and we know that the coordinate wise difference is lower than the norm difference.\n            if not relative:\n                np.testing.assert_allclose(first, second, atol=tol)\n            diff = self._shape_equal(first, second)\n            diff = ( ( np.asarray( diff.flatten() )**2).sum() )**.5\n\n            scale = (2/(np.linalg.norm(np.asarray(first).flat) + np.linalg.norm(np.asarray(second).flat)) ) if relative else 1\n            max_diff = diff*scale\n            if max_diff >= tol:\n                msg = "" if msg is None else msg\n                print(f"|first - second|_2 = {max_diff} > {tol} ")\n                # Deletage to numpy. Let numpy make nicer messages.\n                np.testing.assert_almost_equal(first, second) # This function does not take a msg parameter.\n                # Make sure to throw an error no matter what.\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=msg + f"Not equal within tolerance {tol}")\n\n    @classmethod\n    def _cache_file(cls):\n        return os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__ + ".pkl"\n\n    @classmethod\n    def _artifact_file_for_setUpClass(cls):\n        file = os.path.join(os.path.dirname(cls._cache_file()), ""+cls.__name__+"-setUpClass.json")\n        print("_artifact_file_for_setUpClass(cls): will return", file, "__class__", cls)\n        # cf = os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__\n        return file\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self.__class__._cache_file()), \'-\'.join(self.cache_id()) + ".json")\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self.__class__._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache is not None:  # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self.__class__._cache_file()\n        if os.path.exists(cfile):\n            try:\n                with open(cfile, \'rb\') as f:\n                    data = pickle.load(f)\n                self.__class__._cache = data\n            except Exception as e:\n                print("Cache file did not exist:", cfile)\n                print(e)\n        else:\n            print("Warning! data file not found", cfile)\n\n    def _get_coverage_files(self):\n        key = (self.cache_id(), \'coverage\')\n        # CC = None\n        # if self._cache_contains(key):\n        return self._cache_get(key, []) # Anything wrong with the empty list?\n        # return CC\n\n    def _get_hints(self):\n        """\n            This code is run when the test is set up to generate the hints and store them in an artifact file. It may be beneficial to simple compute them beforehand\n            and store them in the local unitgrade pickle file. This code is therefore expected to superceede the alterative code later.\n        """\n        hints = []\n        # print("Getting hint")\n        key = (self.cache_id(), \'coverage\')\n        if self._cache_contains(key):\n            CC = self._cache_get(key)\n            # cl, m = self.cache_id()\n            # print("Getting hint using", CC)\n            # Insert newline to get better formatting.\n            # gprint(\n            #     f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n            for file in CC:\n                rec = CC[file]\n                # gprint(f">   * {file}")\n                for l in rec:\n                    _, comments = CC[file][l]\n                    hint = get_hints(comments)\n\n                    if hint != None:\n                        hints.append((hint, file, l))\n\n        doc = self._testMethodDoc\n        # print("doc", doc)\n        if doc is not None:\n            hint = get_hints(self._testMethodDoc)\n            if hint is not None:\n                hints = [(hint, None, self.cache_id()[1])] + hints\n\n        return hints\n\n    def _feedErrorsToResult(self, result, errors):\n        """ Use this to show hints on test failure.\n        It feeds error to the result -- so if there are errors, they will crop up here\n        """\n        self._error_fed_during_run = errors.copy() # import to copy the error list.\n\n        # result._test._error_fed_during_run = errors.copy()\n\n        if not isinstance(result, UTextResult):\n            er = [e for e, v in errors if v != None]\n            # print("Errors are", errors)\n            if len(er) > 0:\n                hints = []\n                key = (self.cache_id(), \'coverage\')\n                if self._cache_contains(key):\n                    CC = self._cache_get(key)\n                    cl, m = self.cache_id()\n                    # Insert newline to get better formatting.\n                    gprint(f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n                    for file in CC:\n                        rec = CC[file]\n                        gprint(f">   * {file}")\n                        for l in rec:\n                            _, comments = CC[file][l]\n                            hint = get_hints(comments)\n\n                            if hint != None:\n                                hints.append((hint, file, l) )\n                            gprint(f">      - {l}")\n\n                er = er[0]\n\n                doc = er._testMethodDoc\n                # print("doc", doc)\n                if doc is not None:\n                    hint = get_hints(er._testMethodDoc)\n                    if hint is not None:\n                        hints = [(hint, None, self.cache_id()[1] )] + hints\n                if len(hints) > 0:\n                    # print(hints)\n                    for hint, file, method in hints:\n                        s = (f"\'{method.strip()}\'" if method is not None else "")\n                        if method is not None and file is not None:\n                            s += " in "\n                        try:\n                            s += (file.strip() if file is not None else "")\n                            gprint(">")\n                            gprint("> Hints (from " + s + ")")\n                            gprint(textwrap.indent(hint, ">   "))\n                        except Exception as e:\n                            print("Bad stuff in hints. ")\n                            print(hints)\n        # result._last_errors = errors\n        super()._feedErrorsToResult(result, errors)\n        b = 234\n\n    def startTestRun(self):\n        super().startTestRun()\n\nclass Required:\n    pass\n\nclass ParticipationTest(UTestCase,Required):\n    max_group_size = None\n    students_in_group = None\n    workload_assignment = {\'Question 1\': [1, 0, 0]}\n\n    def test_students(self):\n        pass\n\n    def test_workload(self):\n        pass\n\n# 817, 705\nclass NotebookTestCase(UTestCase):\n    notebook = None\n    _nb = None\n    @classmethod\n    def setUpClass(cls) -> None:\n        with Capturing():\n            cls._nb = importnb.Notebook.load(cls.notebook)\n\n    @property\n    def nb(self):\n        return self.__class__._nb\n # 870.\n\nimport hashlib\nimport io\nimport tokenize\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,\n                            show_tol_err=False, show_privisional=True, noprogress=None,\n                            generate_artifacts=True):\n    args = parser.parse_args()\n    if noprogress is None:\n        noprogress = args.noprogress\n\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,\n                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err,\n                                          generate_artifacts=generate_artifacts)\n\n\n    if question is None and show_privisional:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        # testcase_methods = list(testCaseClass.__dict__.keys())\n        ls = []\n        for C in testCaseClass.mro():\n            if issubclass(C, unittest.TestCase):\n                ls = list(C.__dict__.keys()) + ls\n        testcase_methods = ls\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n    # print("Started: " + dt_string)\n    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += f" version {report.version}"\n    print(s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n        if not report.abbreviate_questions:\n            q_title_print = "Question %i: %s"%(n+1, qtitle)\n        else:\n            q_title_print = "q%i) %s" % (n + 1, qtitle)\n\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        # q_ = {} # Gather score in this class.\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n        UTextResult.number = n\n        UTextResult.nL = report.nL\n        UTextResult.unmute = unmute # Hacky as well.\n        UTextResult.setUpClass_time = q._cache.get(((q.__name__, \'setUpClass\'), \'time\'), 3) if hasattr(q, \'_cache\') and q._cache is not None else 3\n\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        details = {}\n        for s, msg in res.successes + res.failures + res.errors:\n            # from unittest.suite import _ErrorHolder\n            # from unittest import _Err\n            # if isinstance(s, _ErrorHolder)\n            if hasattr(s, \'_testMethodName\'):\n                key = (q.__name__, s._testMethodName)\n            else:\n                # In case s is an _ErrorHolder (unittest.suite)\n                key = (q.__name__, s.id())\n            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.\n\n            detail = {}\n            if (s,msg) in res.successes:\n                detail[\'status\'] = "pass"\n            elif (s,msg) in res.failures:\n                detail[\'status\'] = \'fail\'\n            elif (s,msg) in res.errors:\n                detail[\'status\'] = \'error\'\n            else:\n                raise Exception("Status not known.")\n\n            nice_title = s.title\n            detail = {**detail, **msg, \'nice_title\': nice_title}#[\'message\'] = msg\n            details[key] = detail\n\n        # q_[s._testMethodName] = ("pass", None)\n        # for (s,msg) in res.failures:\n        #     q_[s._testMethodName] = ("fail", msg)\n        # for (s,msg) in res.errors:\n        #     q_[s._testMethodName] = ("error", msg)\n        # res.successes[0]._get_outcome()\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': details, \'title\': qtitle, \'name\': q.__name__,\n                   }\n        q.obtained = obtained\n        q.possible = possible\n        # print(q._cache)\n        # print(q._covcache)\n        s1 = f" * q{n+1})   Total"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\ndef python_code_str_id(python_code, strip_comments_and_docstring=True):\n    s = python_code\n\n    if strip_comments_and_docstring:\n        try:\n            s = remove_comments_and_docstrings(s)\n        except Exception as e:\n            print("--"*10)\n            print(python_code)\n            print(e)\n\n    s = "".join([c.strip() for c in s.split()])\n    hash_object = hashlib.blake2b(s.encode())\n    return hash_object.hexdigest()\n\n\ndef file_id(file, strip_comments_and_docstring=True):\n    with open(file, \'r\') as f:\n        # s = f.read()\n        return python_code_str_id(f.read())\n\n\ndef remove_comments_and_docstrings(source):\n    """\n    Returns \'source\' minus comments and docstrings.\n    """\n    io_obj = io.StringIO(source)\n    out = ""\n    prev_toktype = tokenize.INDENT\n    last_lineno = -1\n    last_col = 0\n    for tok in tokenize.generate_tokens(io_obj.readline):\n        token_type = tok[0]\n        token_string = tok[1]\n        start_line, start_col = tok[2]\n        end_line, end_col = tok[3]\n        ltext = tok[4]\n        # The following two conditionals preserve indentation.\n        # This is necessary because we\'re not using tokenize.untokenize()\n        # (because it spits out code with copious amounts of oddly-placed\n        # whitespace).\n        if start_line > last_lineno:\n            last_col = 0\n        if start_col > last_col:\n            out += (" " * (start_col - last_col))\n        # Remove comments:\n        if token_type == tokenize.COMMENT:\n            pass\n        # This series of conditionals removes docstrings:\n        elif token_type == tokenize.STRING:\n            if prev_toktype != tokenize.INDENT:\n        # This is likely a docstring; double-check we\'re not inside an operator:\n                if prev_toktype != tokenize.NEWLINE:\n                    # Note regarding NEWLINE vs NL: The tokenize module\n                    # differentiates between newlines that start a new statement\n                    # and newlines inside of operators such as parens, brackes,\n                    # and curly braces.  Newlines inside of operators are\n                    # NEWLINE and newlines that start new code are NL.\n                    # Catch whole-module docstrings:\n                    if start_col > 0:\n                        # Unlabelled indentation means we\'re inside an operator\n                        out += token_string\n                    # Note regarding the INDENT token: The tokenize module does\n                    # not label indentation inside of an operator (parens,\n                    # brackets, and curly braces) as actual indentation.\n                    # For example:\n                    # def foo():\n                    #     "The spaces before this docstring are tokenize.INDENT"\n                    #     test = [\n                    #         "The spaces before this string do not get a token"\n                    #     ]\n        else:\n            out += token_string\n        prev_toktype = token_type\n        last_col = end_col\n        last_lineno = end_line\n    return out\n\nimport textwrap\nimport bz2\nimport pickle\nimport os\nimport zipfile\nimport io\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    f = m.__file__\n    if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'):\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        im = __import__(m.__name__.split(\'.\')[0])\n        if isinstance(im, list):\n            print("im is a list")\n            print(im)\n        # the __path__ attribute *may* be a string in some cases. I had to fix this.\n        print("path.:",  __import__(m.__name__.split(\'.\')[0]).__path__)\n        # top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__[0]\n        module_import = False\n\n    found_hashes = {}\n    # pycode = {}\n    resources[\'pycode\'] = {}\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)\n                    zip.write(fpath, v)\n                    if not fpath.endswith("_grade.py"): # Exclude grade files.\n                        with open(fpath, \'r\') as f:\n                            s = f.read()\n                        found_hashes[v] = python_code_str_id(s)\n                        resources[\'pycode\'][v] = s\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    resources[\'blake2b_file_hashes\'] = found_hashes\n    return resources, top_package\n\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_report_source_include(report):\n    sources = {}\n    # print("")\n    # if not args.autolab:\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            _, report_relative_location, module_import = report._import_base_relative()\n\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'report_module_specification\'] = module_import\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            print(f" * {m.__name__}")\n    return sources\n\ndef gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):\n    # n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab,\n                                          generate_artifacts=False,\n                                          )\n    print("")\n    sources = {}\n    if not args.autolab:\n        results[\'sources\'] = sources = gather_report_source_include(report)\n\n    token_plain = """\n# This file contains your results. Do not edit its content. Simply upload it as it is. """\n\n    s_include = [token_plain]\n    known_hashes = []\n    cov_files = []\n    use_coverage = True\n    if report._config is not None:\n        known_hashes = report._config[\'blake2b_file_hashes\']\n        for Q, _ in report.questions:\n            use_coverage = use_coverage and isinstance(Q, UTestCase)\n            for key in Q._cache:\n                if len(key) >= 2 and key[1] == "coverage":\n                    for f in Q._cache[key]:\n                        cov_files.append(f)\n\n    for s in sources.values():\n        for f_rel, hash in s[\'blake2b_file_hashes\'].items():\n            if hash in known_hashes and f_rel not in cov_files and use_coverage:\n                print("Skipping", f_rel)\n            else:\n                if token_include_plaintext_source:\n                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)\n                    s_include.append("")\n                    s_include.append(s[\'pycode\'][f_rel])\n                    s_include.append("")\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = f"_v{report.version}" if report.version is not None else ""\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.normpath(os.path.join(output_dir, token))\n\n    save_token(results, "\\n".join(s_include), token)\n\n    if not args.autolab:\n        print("> Testing token file integrity...", sep="")\n        load_token(token)\n        print("Done!")\n        print(" ")\n        print("To get credit for your results, please upload the single unmodified file: ")\n        print(">", token)\n\n\ndef save_token(dictionary, plain_text, file_out):\n    if plain_text is None:\n        plain_text = ""\n    if len(plain_text) == 0:\n        plain_text = "Start token file"\n    plain_text = plain_text.strip()\n    b, b_hash = dict2picklestring(dictionary)\n    b_l1 = len(b)\n    b = "."+b+"."\n    b = "\\n".join( textwrap.wrap(b, 180))\n\n    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]\n    with open(file_out, \'w\') as f:\n        f.write("\\n".join(out))\n\n\n\n\ndef source_instantiate(name, report1_source, payload):\n    # print("Executing sources", report1_source)\n    eval("exec")(report1_source, globals())\n    # print("Loaind gpayload..")\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    return report\n\n\n__version__ = "0.1.29.0"\n\nfrom cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo\nimport time\nimport numpy as np\nimport pickle\nimport os\n# from unitgrade.framework import dash\n\ndef mk_bad():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 100, \'x2\': 300}\n        pickle.dump(d, f)\n\ndef mk_ok():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 1, \'x2\': 2}\n        pickle.dump(d, f)\n\nclass Numpy(UTestCase):\n    z = 234\n\n    # def __getattr__(self, item):\n    #     print("hi there ", item)\n    #     return super().__getattr__(item)\n    #\n    # def __getattribute__(self, item):\n    #     print("oh hello sexy. ", item)\n    #     return super().__getattribute__(item)\n\n    @classmethod_dashboard\n    def setUpClass(cls) -> None:\n        print("Dum di dai, I am running some setup code here.")\n        for i in range(10):\n            print("Hello world", i)\n        print("Set up.") # must be handled seperately.\n        # assert False\n\n    # @cache\n    # def make_primes(self, n):\n    #     return primes(n)\n\n    # def setUp(self) -> None:\n    #     print("We are doing the setup thing.")\n\n    def test_bad(self):\n        """\n        Hints:\n            * Remember to properly de-indent your code.\n            * Do more stuff which works.\n        """\n        # raise Exception("This ended poorly")\n        # print("Here we go")\n        # return\n        # self.assertEqual(1, 1)\n        with open(os.path.dirname(__file__)+"/db.pkl", \'rb\') as f:\n            d = pickle.load(f)\n        # print(d)\n        # assert False\n        # for i in range(10):\n        from tqdm import tqdm\n        for i in tqdm(range(100)):\n            # print("The current number is", i)\n            time.sleep(.01)\n        self.assertEqual(1, d[\'x1\'])\n        for b in range(10):\n            self.assertEqualC(add(3, b))\n\n\n    def test_weights(self):\n        """\n            Hints:\n            * Try harder!\n            * Check the chapter on linear regression.\n        """\n        n = 3\n        m = 2\n        np.random.seed(5)\n        # from numpy import asdfaskdfj\n        # X = np.random.randn(n, m)\n        # y = np.random.randn(n)\n        foo()\n        # assert 2 == 3\n        # raise Exception("Bad exit")\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertL2(linear_regression_weights(X, y), msg="the message")\n        self.assertEqual(1, 1)\n        # self.assertEqual(1,2)\n        return "THE RESULT OF THE TEST"\n\n\nclass AnotherTest(UTestCase):\n    def test_more(self):\n        self.assertEqual(2,2)\n\n    def test_even_more(self):\n        self.assertEqual(2,2)\n\nimport cs108\nclass Report2(Report):\n    title = "CS 101 Report 2"\n    questions = [\n        (Numpy, 10), (AnotherTest, 20)\n        ]\n    pack_imports = [cs108]'
+report1_payload = '8004954f040000000000007d94288c054e756d7079947d942868018c0a7365745570436c6173739486948c0474696d65948694473f3bf0000000000068018c08746573745f6261649486948c057469746c6594869468076801680786948c066173736572749486947d94284b004b034b014b044b024b054b034b064b044b074b054b084b064b094b074b0a4b084b0b4b094b0c7568016807869468058694473ff08790000000006801680786948c08636f7665726167659486947d948c1263733130382f686f6d65776f726b312e7079947d948c0e6465662061646428612c62293a20944b128ca12020202022222220476976656e2074776f206e756d626572732060616020616e642060626020746869732066756e6374696f6e2073686f756c642073696d706c792072657475726e2074686569722073756d3a0a202020203e2061646428612c6229203d20612b620a2020202048696e74733a0a20202020202020202a2052656d656d6265722062617369632061726974686d6574696373210a20202020222222948694737368018c0c746573745f7765696768747394869468098694681a6801681a8694680c86947d946801681a869468058694473f407400000000006801681a8694681286947d948c1263733130382f686f6d65776f726b312e7079947d94288c0b64656620666f6f28293a20944b168c162020202022222220436f6d6d656e742e2020202222229486948c0b6465662062617228293a20944b198c009486947573758c0b416e6f7468657254657374947d9428682d6803869468058694473f22700000000000682d8c09746573745f6d6f7265948694680c86947d94682d6831869468058694473f21200000000000682d8c0e746573745f6576656e5f6d6f7265948694680c86947d94682d6837869468058694473f1a700000000000758c06636f6e666967947d948c13626c616b6532625f66696c655f686173686573945d94288c806362363363336235383635306636313037643763663138646136303635666135373835666261626564643135316639653761633335313139323635623039393838623266653335373632303961333932616133656236633134636131316439646335393937343831633531373863313533393665656662313539653163373536948c803434656331613338643134373639626433653234323663386232366539303830356336313361386161653266333966663665633433363133666562363465303739373435323062306536353134353063303637623763633637636631366134313835653736346334383331373763333335303063626563626362336234646466948c803638306336353638323633623832303737313365616434306539323663643265363835336130613936353861386338343738393564363633643730643262343666616163333336396133636564366239623964303436346563316366656465326235306265376432626636313432313638383936663332306338353232313066946573752e'
 name="Report2"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json
deleted file mode 100644
index e01b626d6395d114075df584e3b2822b1eda02d1..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json
+++ /dev/null
@@ -1 +0,0 @@
-{"state": "pass", "run_id": 863304, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json
deleted file mode 100644
index 710d65e381eb837b29dd244b6a14b8dc43e8fa75..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json
+++ /dev/null
@@ -1 +0,0 @@
-{"state": "pass", "run_id": 282722, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl
index 1a07e6f6b3f4993e4e5fe85ae58a067520f4eade..43a2319d3d9445b0cf1259dfc350ec87a82aecba 100644
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl and b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json
deleted file mode 100644
index a46f0a4507d0b006f7c9702b473ccfe098f07e4c..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json
+++ /dev/null
@@ -1 +0,0 @@
-{"run_id": 188727, "coverage_files_changed": null, "stdout": [[0, "Dum di dai, I am running some setup code here.\nHello world 0\nHello world 1\nHello world 2\nHello world 3\nHello world 4\nHello world 5\nHello world 6\nHello world 7\nHello world 8\nHello world 9\nSet up.\nDashboard> Evaluation completed."]], "state": "pass"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json
deleted file mode 100644
index 4ecb597ff913319a6b1de4e7d745cf63a6972bfd..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json
+++ /dev/null
@@ -1 +0,0 @@
-{"state": "fail", "run_id": 1789, "coverage_files_changed": null, "stdout": [[0, "\u001b[31m\r  0%|          | 0/100 [00:00<?, ?it/s]\u001b[37m"], [1, "\u001b[31m\r 10%|#         | 10/100 [00:00<00:00, 97.25it/s]\u001b[37m\u001b[31m\r 20%|##        | 20/100 [00:00<00:00, 97.27it/s]\u001b[37m"], [2, "\u001b[31m\r 30%|###       | 30/100 [00:00<00:00, 95.97it/s]\u001b[37m"], [3, "\u001b[31m\r 40%|####      | 40/100 [00:00<00:00, 95.72it/s]\u001b[37m\u001b[31m\r 50%|#####     | 50/100 [00:00<00:00, 93.34it/s]\u001b[37m"], [4, "\u001b[31m\r 60%|######    | 60/100 [00:00<00:00, 91.76it/s]\u001b[37m\u001b[31m\r 70%|#######   | 70/100 [00:00<00:00, 93.45it/s]\u001b[37m"], [5, "\u001b[31m\r 80%|########  | 80/100 [00:00<00:00, 94.95it/s]\u001b[37m\u001b[31m\r 90%|######### | 90/100 [00:00<00:00, 95.62it/s]\u001b[37m"], [6, "\u001b[31m\r100%|##########| 100/100 [00:01<00:00, 95.82it/s]\u001b[37m\u001b[31m\u001b[37m\u001b[31m\r100%|##########| 100/100 [00:01<00:00, 94.89it/s]\u001b[37m\u001b[31m\n\u001b[37m\u001b[92m>\n\u001b[92m> Hints (from 'test_bad')\n\u001b[92m>   * Remember to properly de-indent your code.\n>   * Do more stuff which works.\n\u001b[31mTraceback (most recent call last):\n  File \"/usr/lib/python3.10/unittest/case.py\", line 59, in testPartExecutor\n    yield\n  File \"/usr/lib/python3.10/unittest/case.py\", line 591, in run\n    self._callTestMethod(testMethod)\n  File \"/home/tuhe/Documents/unitgrade/src/unitgrade/framework.py\", line 534, in _callTestMethod\n    res = testMethod()\n  File \"/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor/cs108/report_devel.py\", line 67, in test_bad\n    self.assertEqual(1, d['x1'])\nAssertionError: 1 != 100\n\u001b[37mDashboard> Evaluation completed."]], "wz_stacktrace": "<div class=\"traceback\">\n  <h3>Traceback <em>(most recent call last)</em>:</h3>\n  <ul><li><div class=\"frame\" id=\"frame-140582372419264\">\n  <h4>File <cite class=\"filename\">\"/usr/lib/python3.10/unittest/case.py\"</cite>,\n      line <em class=\"line\">59</em>,\n      in <code class=\"function\">testPartExecutor</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">    </span>@contextlib.contextmanager</pre>\n<pre class=\"line before\"><span class=\"ws\">    </span>def testPartExecutor(self, test_case, isTest=False):</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>old_success = self.success</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self.success = True</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>try:</pre>\n<pre class=\"line current\"><span class=\"ws\">            </span>yield</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except KeyboardInterrupt:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>raise</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except SkipTest as e:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.success = False</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.skipped.append((test_case, str(e)))</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597696\">\n  <h4>File <cite class=\"filename\">\"/usr/lib/python3.10/unittest/case.py\"</cite>,\n      line <em class=\"line\">591</em>,\n      in <code class=\"function\">run</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">                </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>self._callSetUp()</pre>\n<pre class=\"line before\"><span class=\"ws\">                </span>if outcome.success:</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>outcome.expecting_failure = expecting_failure</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self, isTest=True):</pre>\n<pre class=\"line current\"><span class=\"ws\">                        </span>self._callTestMethod(testMethod)</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>outcome.expecting_failure = False</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line after\"><span class=\"ws\">                        </span>self._callTearDown()</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">                </span>self.doCleanups()</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597808\">\n  <h4>File <cite class=\"filename\">\"/home/tuhe/Documents/unitgrade/src/unitgrade/framework.py\"</cite>,\n      line <em class=\"line\">534</em>,\n      in <code class=\"function\">_callTestMethod</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span>self._ensure_cache_exists()  # Make sure cache is there.</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>if self._testMethodDoc is not None:</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>self._cache_put((self.cache_id(), &#39;title&#39;), self.shortDescriptionStandard())</pre>\n<pre class=\"line before\"><span class=\"ws\"></span> </pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self._cache2[(self.cache_id(), &#39;assert&#39;)] = {}</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>res = testMethod()</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>elapsed = time.time() - t</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._get_outcome()[ (self.cache_id(), &#34;return&#34;) ] = res</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._cache_put((self.cache_id(), &#34;time&#34;), elapsed)</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597920\">\n  <h4>File <cite class=\"filename\">\"/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor/cs108/report_devel.py\"</cite>,\n      line <em class=\"line\">67</em>,\n      in <code class=\"function\">test_bad</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span># for i in range(10):</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>from tqdm import tqdm</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>for i in tqdm(range(100)):</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span># print(&#34;The current number is&#34;, i)</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>time.sleep(.01)</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>self.assertEqual(1, d[&#39;x1&#39;])</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>for b in range(10):</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.assertEqualC(add(3, b))</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">    </span>def test_weights(self):</pre></div>\n</div>\n</ul>\n  <blockquote>AssertionError: 1 != 100\n</blockquote>\n</div>\n"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json
deleted file mode 100644
index 6b4397cede5c24c76c1eba7d9f532ffa9b548b76..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json
+++ /dev/null
@@ -1 +0,0 @@
-{"state": "pass", "run_id": 766225, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl
index a8b4258ab07e08e10c14cb8ff117771a4e3d33e0..5ab625a286557ef448281e4a58d090bbd744fc8c 100644
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl and b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db
index eba28aab5e607cfee36521a00079738cc07361f5..a50dc2f94a42cb35bee1de7c32d1bd2eb5b48c86 100644
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db and b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm
deleted file mode 100644
index 43bd309ec02e932fe394e3429744ffee7e9391ef..0000000000000000000000000000000000000000
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm and /dev/null differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal
deleted file mode 100644
index 8d0e796d83831dcce78b83a22f959c52a9520a52..0000000000000000000000000000000000000000
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal and /dev/null differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl
index 0889adfe5ecf9f47930b690d68842f07548476b6..93e73b00832ec7e5b3272af33d44b4c9612033a1 100644
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl and b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl.lock b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json
deleted file mode 100644
index 098b70e81bd659d0cdec8bb46e200f794eb718d0..0000000000000000000000000000000000000000
--- a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json
+++ /dev/null
@@ -1 +0,0 @@
-{"encoding_scheme": " from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;", "questions": "/Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4ALJAW9dAEABDnncV35phHsyxOZ/WAdcsRcnyJl1OO/vI8mjmhFI6lWS7SfFyihoIfXWjVmlOSPIYt5RtCJvS/3j4bxa5pi+3PPpcnS2VzmHCG1Ro9va9QyFawpcqgSSGVWVFndTK1xzGnFnOEsQAgiJ0VB9ATsnpaY1K5Z1aravch16BLCzLWocn3K1egojbjfRrL6HkB7XP21nDmqPeoHjVZNY6QM2BV9RrnccWViu+u9PVaH/q5YyjX36FQhwsGiMGmIM/LaZzWCyCJt7bbYjq1UXgqmMRrvYwHAXCeoFH1McQxAWLW4P2GU1rZqLMKc/OoQjEvMZdHxWkQBqE2wS4++OffV1YnQ7I0xOljxcxIxhVlPVxUFyj/D04h0CF/ekMP1FxoZsff7QPOLT6apxSDa9jtn1P+u4E9eo30YIdFqZt6hmAmAOyVtNVJxeV+gOFhQOLe8suClVF+MoeXta30NnnqCdXgsjq0/69kvgmi1jackAAPVvsIGknWNrAAGLA8oFAACfhrnvscRn+wIAAAAABFla", "root_dir": "/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor", "relative_path": "cs108/report_devel.py", "modules": ["cs108", "report_devel"], "token_stub": "cs108/Report2_handin"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/report_devel.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/report_devel.json.lock
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/docs/README.jinja.md b/docs/README.jinja.md
index 6fdc56d16e2851a21dbcc77dab85d734d04d8da2..b2e21840e1d4158caea203bff5ca1f24d653c340 100644
--- a/docs/README.jinja.md
+++ b/docs/README.jinja.md
@@ -15,6 +15,7 @@ Unitgrade is an automatic report and exam evaluation framework that enables inst
     - Instructors can automatically verify the students solution using a Docker VM and run hidden tests
  - Automatic Moss anti-plagiarism detection
  - CMU Autolab integration (Experimental)
+ - A live dashboard which shows the outcome of the tests
 
 ### Install
 Simply use `pip`
@@ -30,6 +31,7 @@ The figure shows an overview of the workflow.
  - You write exercises and a suite of unittests. 
  - They are then compiled to a version of the exercises without solutions. 
  - The students solve the exercises using the tests and when they are happy, they run an automatically generated `_grade.py`-script to produce a `.token`-file with the number of points they obtain. This file is then uploaded for further verification/evaluation.
+ - The students can see their progress and review hints using the dashboard (see below)
 
 ### Videos
 Videos where I try to talk and code my way through the examples can be found on youtube:
@@ -114,6 +116,32 @@ This runs an identical set of tests and produces the file `Report1_handin_10_of_
  - You can easily use the framework to include output of functions. 
  - See below for how to validate the students results 
 
+
+### Viewing the results using the dashboard
+I recommend to monitor and run the tests from the IDE, as this allows you to use the debugger in conjunction with your tests. 
+However, unitgrade comes with a dashboard that allows students to see the outcome of individual tests 
+ and what is currently recorded in the `token`-file. To start the dashboard, they should simply run the command
+```
+unitgrade
+```
+from a directory that contains a test (the directory will be searched recursively for test files). 
+ The command will start a small background service and open a webpage:
+
+![The dashboard](https://gitlab.compute.dtu.dk/tuhe/unitgrade/-/raw/master/docs/dashboard.png)
+
+Features supported in the current version:
+ - Shows which files need to be edited to solve the problem
+ - Collect hints given in the homework files and display them for the relevant tests
+ - fully responsive -- the UI, including the terminal, will update while the test is running regardless of where you launch the test
+ - Allows students to re-run tests from the UI
+ - Shows current test status and results captured in `.token`-file
+ - Tested on Windows/Linux 
+ - Frontend is pure javascript and the backend only depends on python packages. 
+
+The frontend is automatically enabled the moment your classes inherits from the `UTestCase`-class; no configuration files required, and there are no known bugs. 
+
+Note the frontend is currently not provided in the pypi `unitgrade` package, but only through the gitlab repository (install using `git clone` and then `pip install -e ./`) -- it seems ready, but I want to test it on mac and a few more systems before publishing it. 
+
 ## How safe is Unitgrade?
 There are three principal ways of cheating:
  - Break the framework and submit a `.token` file that 'lies' about the true number of points
diff --git a/docs/snips/0_homework1.py b/docs/snips/0_homework1.py
index 6722399a2047fe8d58fddc75202aace6f3db34dc..39004d78f9042ae6bddc4a8d632132f81a5e0e0d 100644
--- a/docs/snips/0_homework1.py
+++ b/docs/snips/0_homework1.py
@@ -1,4 +1,4 @@
-# example_moss/tmp/submissions/s1003/0_homework1.py
+# example_moss/tmp/submissions/s1002/0_homework1.py
 def reverse_list(mylist): #!f 
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
@@ -13,8 +13,8 @@ def reverse_list(mylist): #!f
 def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    sum2 = a + b
-    return sum2
+    sum = a + b
+    return sum
 
 if __name__ == "__main__":
     # Example usage:
diff --git a/docs/snips/deploy.txt b/docs/snips/deploy.txt
index bad9a4343f5acd27e7445ed927565e8bf0e23b84..9c9f5f667e004436d9a0da31dc8d1e8cc3b9160f 100644
--- a/docs/snips/deploy.txt
+++ b/docs/snips/deploy.txt
@@ -3,27 +3,71 @@
 | | | |_ __  _| |_| |  \/_ __ __ _  __| | ___ 
 | | | | '_ \| | __| | __| '__/ _` |/ _` |/ _ \
 | |_| | | | | | |_| |_\ \ | | (_| | (_| |  __/
- \___/|_| |_|_|\__|\____/_|  \__,_|\__,_|\___| v0.1.22, started: 15/06/2022 09:18:15
+ \___/|_| |_|_|\__|\____/_|  \__,_|\__,_|\___| v0.1.27, started: 16/09/2022 14:30:15
 
 CS 102 Report 2 
 Question 1: Week1                                                                                                       
- * q1.1) test_add...................................................................................................PASS
- * q1.2) test_reverse...............................................................................................PASS
+ * q1.1) test_add.................................................................................................FAILED
+ * q1.2) test_reverse.............................................................................................FAILED
  * q1.3) test_output_capture........................................................................................PASS
- * q1)   Total.................................................................................................... 10/10
+======================================================================
+FAIL: test_add (__main__.Week1)
+test_add
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "<string>", line 882, in _callTestMethod
+  File "<string>", line 1699, in test_add
+  File "<string>", line 987, in assertEqualC
+  File "<string>", line 975, in wrap_assert
+AssertionError: 4 != 'Key 0 not found in cache; framework files missing. Please run deploy()'
+
+======================================================================
+FAIL: test_reverse (__main__.Week1)
+test_reverse
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "<string>", line 882, in _callTestMethod
+  File "<string>", line 1703, in test_reverse
+  File "<string>", line 987, in assertEqualC
+  File "<string>", line 975, in wrap_assert
+AssertionError: [3, 2, 1] != 'Key 0 not found in cache; framework files missing. Please run deploy()'
+
+ * q1)   Total..................................................................................................... 3/10
  
 Question 2: The same problem as before with nicer titles                                                                
- * q2.1) Test the addition method add(a,b)..........................................................................PASS
- * q2.2) Checking if reverse_list([1, 2, 3]) = [3, 2, 1]............................................................PASS
- * q2)   Total...................................................................................................... 6/6
+ * q2.1) Test the addition method add(a,b)........................................................................FAILED
+ * q2.2) test_reverse.............................................................................................FAILED
+======================================================================
+FAIL: test_add (__main__.Week1Titles)
+Test the addition method add(a,b)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "<string>", line 882, in _callTestMethod
+  File "<string>", line 1715, in test_add
+  File "<string>", line 987, in assertEqualC
+  File "<string>", line 975, in wrap_assert
+AssertionError: 4 != 'Key 0 not found in cache; framework files missing. Please run deploy()'
+
+======================================================================
+FAIL: test_reverse (__main__.Week1Titles)
+test_reverse
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "<string>", line 882, in _callTestMethod
+  File "<string>", line 1723, in test_reverse
+  File "<string>", line 987, in assertEqualC
+  File "<string>", line 975, in wrap_assert
+AssertionError: [3, 2, 1] != 'Key 0 not found in cache; framework files missing. Please run deploy()'
+
+ * q2)   Total...................................................................................................... 0/6
  
-Total points at 09:18:16 (0 minutes, 0 seconds)....................................................................16/16
+Total points at 14:30:15 (0 minutes, 0 seconds).....................................................................3/16
 
 Including files in upload...
-path.: _NamespacePath(['C:\\Users\\tuhe\\Documents\\unitgrade_private\\examples\\example_framework\\instructor\\cs102', 'C:\\Users\\tuhe\\Documents\\unitgrade_private\\examples\\example_framework\\instructor\\cs102'])
+path.: _NamespacePath(['/home/tuhe/Documents/unitgrade_private/examples/example_framework/instructor/cs102', '/home/tuhe/Documents/unitgrade_private/examples/example_framework/instructor/cs102'])
  * cs102
 > Testing token file integrity...
 Done!
  
 To get credit for your results, please upload the single unmodified file: 
-> C:\Users\tuhe\Documents\unitgrade_private\examples\example_framework\instructor\cs102\Report2_handin_16_of_16.token
+> /home/tuhe/Documents/unitgrade_private/examples/example_framework/instructor/cs102/Report2_handin_3_of_16.token
diff --git a/docs/snips/deploy_autolab_a.py b/docs/snips/deploy_autolab_a.py
index 3abb33365597a48969d05b2e33cd72207ab28e23..7c490a7713fffb1e575f9c1cbb085ec4ee70b3e1 100644
--- a/docs/snips/deploy_autolab_a.py
+++ b/docs/snips/deploy_autolab_a.py
@@ -1,6 +1,6 @@
-# autolab_token_upload/deploy_autolab.py
+# autolab_example_py_upload/instructor/cs102_autolab/deploy_autolab.py
     # Step 1: Download and compile docker grading image. You only need to do this once.  
-    download_docker_images("./docker") # Download docker images from gitlab (only do this once.
-    dockerfile = f"./docker/docker_tango_python/Dockerfile"
-    autograde_image = 'tango_python_tue'
-    compile_docker_image(Dockerfile=dockerfile, tag=autograde_image)  # Compile docker image. 
\ No newline at end of file
+    download_docker_images("../docker") # Download docker images from gitlab (only do this once).
+    dockerfile = f"../docker/docker_tango_python/Dockerfile"
+    autograde_image = 'tango_python_tue2'  # Tag given to the image in case you have multiple images.
+    compile_docker_image(Dockerfile=dockerfile, tag=autograde_image, no_cache=False)  # Compile docker image. 
\ No newline at end of file
diff --git a/docs/snips/deploy_autolab_b.py b/docs/snips/deploy_autolab_b.py
index db9a49966a1a73959a69aac6751b1d6a9d500485..51c186d99ca0972cb9d59867c9c8983c46b91ba3 100644
--- a/docs/snips/deploy_autolab_b.py
+++ b/docs/snips/deploy_autolab_b.py
@@ -1,10 +1,14 @@
-# autolab_token_upload/deploy_autolab.py
+# autolab_example_py_upload/instructor/cs102_autolab/deploy_autolab.py
     # Step 2: Create the cs102.tar file from the grade scripts. 
-    instructor_base = f"../example_framework/instructor"
-    student_base = f"../example_framework/students"
-    output_tar = deploy_assignment("cs102",  # Autolab name of assignment (and name of .tar file)
+    instructor_base = f"."
+    student_base = f"../../students/cs102_autolab"
+
+    from report2_test import Report2
+    # INSTRUCTOR_GRADE_FILE =
+    output_tar = new_deploy_assignment("cs105h",  # Autolab name of assignment (and name of .tar file)
                                    INSTRUCTOR_BASE=instructor_base,
-                                   INSTRUCTOR_GRADE_FILE=f"{instructor_base}/cs102/report2_grade.py",
+                                   INSTRUCTOR_GRADE_FILE=f"{instructor_base}/report2_test_grade.py",
                                    STUDENT_BASE=student_base,
-                                   STUDENT_GRADE_FILE=f"{student_base}/cs102/report2_grade.py",
-                                   autograde_image_tag=autograde_image) 
\ No newline at end of file
+                                   STUDENT_GRADE_FILE=f"{instructor_base}/report2_test.py",
+                                   autograde_image_tag=autograde_image,
+                                   homework_file="homework1.py") 
\ No newline at end of file
diff --git a/docs/snips/homework1.py b/docs/snips/homework1.py
index 00d6c1f24e35cff32dccbe3995d8a8fd0a0f87be..54fe19e25b256c7cfaee3d59c8f6d833b2e8a984 100644
--- a/docs/snips/homework1.py
+++ b/docs/snips/homework1.py
@@ -1,4 +1,4 @@
-# example_simplest/instructor/cs101/homework1.py
+# autolab_example_py_upload/instructor/cs102_autolab/homework1.py
 def reverse_list(mylist): #!f 
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
@@ -9,9 +9,8 @@ def reverse_list(mylist): #!f
 def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    return a+b
+    return a+b*2
 
-if __name__ == "__main__":
-    # Example usage:
+if __name__ == "__main__": # Example usage:
     print(f"Your result of 2 + 2 = {add(2,2)}")
     print(f"Reversing a small list", reverse_list([2,3,5,7])) 
\ No newline at end of file
diff --git a/docs/snips/report1_all.py b/docs/snips/report1_all.py
index 67c1bf367647e31250d69eb0363c8e5db27da89e..2c9e51bb6ba8a087d40e4a57d442b37802388a41 100644
--- a/docs/snips/report1_all.py
+++ b/docs/snips/report1_all.py
@@ -18,4 +18,9 @@ class Report1(Report):
     pack_imports = [cs101]     # Include all .py files in this folder
 
 if __name__ == "__main__":
-    evaluate_report_student(Report1()) 
\ No newline at end of file
+    # from HtmlTestRunner import HTMLTestRunner
+    import HtmlTestRunner
+    unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='example_dir'))
+
+
+    # evaluate_report_student(Report1()) 
\ No newline at end of file
diff --git a/docs/snips/report2.py b/docs/snips/report2.py
index e7aa0ed0811323a8cfe939ebc214aef6a1cae2e9..ab18c20d27ea9d9d1acfedf973f08bbac51ca60f 100644
--- a/docs/snips/report2.py
+++ b/docs/snips/report2.py
@@ -1,10 +1,16 @@
 # example_framework/instructor/cs102/report2.py
 from unitgrade import UTestCase, cache  
 
+
+
 class Week1(UTestCase):
+    @classmethod
+    def setUpClass(cls) -> None:
+        a = 234
+
     def test_add(self):
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
-    def test_reverse(self):
-        self.assertEqualC(reverse_list([1, 2, 3])) 
\ No newline at end of file
+    # def test_reverse(self):
+    #     self.assertEqualC(reverse_list([1, 2, 3])) 
\ No newline at end of file
diff --git a/docs/snips/report2_b.py b/docs/snips/report2_b.py
index 5de6d0b254387e7a82b58906cedd82ccabd43215..cd07d0fd665b3b6b55cdd52c6a17052b83c12069 100644
--- a/docs/snips/report2_b.py
+++ b/docs/snips/report2_b.py
@@ -1,16 +1,16 @@
 # example_framework/instructor/cs102/report2.py
-class Week1Titles(UTestCase): 
-    """ The same problem as before with nicer titles """
-    def test_add(self):
-        """ Test the addition method add(a,b) """
-        self.assertEqualC(add(2,2))
-        print("output generated by test")
-        self.assertEqualC(add(-100, 5))
-        # self.assertEqual(2,3, msg="This test automatically fails.")
-
-    def test_reverse(self):
-        ls = [1, 2, 3]
-        reverse = reverse_list(ls)
-        self.assertEqualC(reverse)
-        # Although the title is set after the test potentially fails, it will *always* show correctly for the student.
-        self.title = f"Checking if reverse_list({ls}) = {reverse}"  # Programmatically set the title 
\ No newline at end of file
+# class Week1Titles(UTestCase): 
+#     """ The same problem as before with nicer titles """
+#     def test_add(self):
+#         """ Test the addition method add(a,b) """
+#         self.assertEqualC(add(2,2))
+#         print("output generated by test")
+#         self.assertEqualC(add(-100, 5))
+#         # self.assertEqual(2,3, msg="This test automatically fails.")
+#
+#     def test_reverse(self):
+#         ls = [1, 2, 3]
+#         reverse = reverse_list(ls)
+#         self.assertEqualC(reverse)
+#         # Although the title is set after the test potentially fails, it will *always* show correctly for the student.
+#         self.title = f"Checking if reverse_list({ls}) = {reverse}"  # Programmatically set the title 
\ No newline at end of file
diff --git a/docs/snips/report2_c.py b/docs/snips/report2_c.py
index aa444a6283536738901c1d496d0ccc32a9d6179a..65c06b3c4aa87b4efdec678235b8a8bcc5e05906 100644
--- a/docs/snips/report2_c.py
+++ b/docs/snips/report2_c.py
@@ -1,16 +1,16 @@
 # example_framework/instructor/cs102/report2.py
-class Question2(UTestCase): 
-    @cache
-    def my_reversal(self, ls):
-        # The '@cache' decorator ensures the function is not run on the *students* computer
-        # Instead the code is run on the teachers computer and the result is passed on with the
-        # other pre-computed results -- i.e. this function will run regardless of how the student happens to have
-        # implemented reverse_list.
-        return reverse_list(ls)
-
-    def test_reverse_tricky(self):
-        ls = (2,4,8)
-        ls2 = self.my_reversal(tuple(ls))                   # This will always produce the right result, [8, 4, 2]
-        print("The correct answer is supposed to be", ls2)  # Show students the correct answer
-        self.assertEqualC(reverse_list(ls))                 # This will actually test the students code.
-        return "Buy world!"                                 # This value will be stored in the .token file  
\ No newline at end of file
+# class Question2(UTestCase): 
+#     @cache
+#     def my_reversal(self, ls):
+#         # The '@cache' decorator ensures the function is not run on the *students* computer
+#         # Instead the code is run on the teachers computer and the result is passed on with the
+#         # other pre-computed results -- i.e. this function will run regardless of how the student happens to have
+#         # implemented reverse_list.
+#         return reverse_list(ls)
+#
+#     def test_reverse_tricky(self):
+#         ls = (2,4,8)
+#         ls2 = self.my_reversal(tuple(ls))                   # This will always produce the right result, [8, 4, 2]
+#         print("The correct answer is supposed to be", ls2)  # Show students the correct answer
+#         self.assertEqualC(reverse_list(ls))                 # This will actually test the students code.
+#         return "Buy world!"                                 # This value will be stored in the .token file  
\ No newline at end of file
diff --git a/docs/unitgrade_devel.bib b/docs/unitgrade_devel.bib
index 2de0c48889fe48c7693e60563702e52ce6547a47..b0682ad8d1d68a793bf5aa0fb29c2c9acb6f1280 100644
--- a/docs/unitgrade_devel.bib
+++ b/docs/unitgrade_devel.bib
@@ -1,7 +1,7 @@
 @online{unitgrade_devel,
-	title={Unitgrade-devel (0.1.39): \texttt{pip install unitgrade-devel}},
+	title={Unitgrade-devel (0.1.42): \texttt{pip install unitgrade-devel}},
 	url={https://lab.compute.dtu.dk/tuhe/unitgrade_private},
-	urldate = {2022-06-15}, 
+	urldate = {2022-09-16}, 
 	month={9},
 	publisher={Technical University of Denmark (DTU)},
 	author={Tue Herlau},
diff --git a/devel/example_devel/instructor/cache.db-shm b/examples/02631/instructor/week5/unitgrade_data/cache.db
similarity index 69%
rename from devel/example_devel/instructor/cache.db-shm
rename to examples/02631/instructor/week5/unitgrade_data/cache.db
index e7762b285bdaf5d8ee34b76d323fd18c34594666..4922ef2db92a2060c5a27c35bf00ec2ce8ec921c 100644
Binary files a/devel/example_devel/instructor/cache.db-shm and b/examples/02631/instructor/week5/unitgrade_data/cache.db differ
diff --git a/examples/example_framework/instructor/cs102/Report2_handin_3_of_16.token b/examples/example_framework/instructor/cs102/Report2_handin_3_of_16.token
new file mode 100644
index 0000000000000000000000000000000000000000..a233c643ed11aade0ea1942ed5c804fce045656f
--- /dev/null
+++ b/examples/example_framework/instructor/cs102/Report2_handin_3_of_16.token
@@ -0,0 +1,188 @@
+# This file contains your results. Do not edit its content. Simply upload it as it is.
+---------------------------------------------------------------------- ..ooO0Ooo.. ----------------------------------------------------------------------
+c3a65f79b5cf3f4dc9d011d60c14f19d064fc2ce5791e50ee371fde1eb3a641e873496c89e0f28e246b08a583b601223350fff3c09a36853ad603d454f604a49 33016
+---------------------------------------------------------------------- ..ooO0Ooo.. ----------------------------------------------------------------------
+./Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4J0VYHddAEABDm3nZErnBBwZI4TZV1/EtCatUi2vohbcuBBJ5KZ4eUO8sSrfysi8ohyh4bmEW5mQL3uXw4mlGPfCLwnvp0MPFPVQxb6fY4mZpRgZBRxlRIoM/bpQNtWpou9B6J7cnUsqhYmbUH+
+bnSRRvMMYKbc7TUF/OBE3206ICWcNMFwplM3Wy/jsHBp3gMOEXGzBiB/bYIF3PL/78KD8At/KY6LIfL48QgZNBSwqiRfI243njTrR3I0JzcMROqFgqbV72Vze0t63OziPvP1z0vhNciwCyuxfIJXS8054HmuabMDiq8F0mAlRbhgQhdoX1ou
+O6vrYg73c4KdKBUqKB6dSQVfqd/d56IO84203NgOa47PwwmjbZcam0Gj95ZEtfOONegSubWfYQF2BbNwXiN3Fzzhk3RrLnrYwy38Q+Pg7AelJRZ2EmLBY//G3F/mmP8awMcVE9FPPOCSlO7k5v9tDInCLOIO3ltDHN2C2UNbtXsWinhX55Jo
+Bz7a414g02fO8vGCKVOwfF5kHW7aPZiEJdIMMzc7mzRT85WVY0DYvM8Dh/xnunI3ZT84GNv6UUZPMaE9qjbM8Eoq/EMqxWFpWpGDB0z3iV5UyxFmoqcYhX+Cdd70do0Tm0ApCl7NXNECilKBTeZS2Z5zd5f+rmAAcNX/t+Wp/vfjExLUtuMQ
+Vx17q8b6YpKQ0hki9Ts+LyCpwBcDleiyc8S9JYL3oajT86MBDi9ZAw627v8N1Pm5d5srd6Gv/Rl+18wAGHOnkBZRUs5pvK7D9Ydy5QsYuZw4Q7uRXO0Gz6R0hzyDL1MgqvEzmkUdxJA1SfOavOpgEgTQnIUztmKicqvAGIHyPGaDbBRQVayM
+MMcCwcu4umoKe2GJdOCpGp6nPLrq0XbKTpgjbvk9NkvR4gftCYR24EmncPn3RO8yebK3LUfmzPpdywmjlXTdDKLC7pM5ureOKM61fPMltMPamX3VnhVNnwLNUjYuOVcVseIJ7k8k8KTMqYjvpDOoodEQ/fBLhK52A3uJWdBvDRk9K8PZ0oa+
+QCk/HURU8bkcJhcmycB2S9Z27gF4DNCXlm6m/fr1SYF9wg0b1v1y8YJoWH0YtlwAjVUMtZAkDtSRAL+2kteRCRdCu+yi7Zq1wto+bYwyTdo7uYr62cPrqbRLnCqy3CtQpowETpyeCaWZJp7fLXbLPGZ5sVuJ40YR0Bpc5eO1YKK98fOhG+V5
+yVsqPMtgSk+99MZeCEZ4Q3hdjqH6RabdioQKr0nsVLQMhSccTJQ1u3R3JAakGt//ymDcR3sRP21r1HkSEYROz85E78Qe1w71PL0/kBKuupsGQlCdyQljpwbo7UgdU3BFOaaHndHw6aG0NkInqzix82gV1JWg0OigXBGc0KO+ARWq2naeDziW
+wv0EE8pW7+rMc8TIzu/geyULO5NIN9hjY+RkB2G4eKab+sr5NFS6IPvrDng4hyYAE8PnWTdcXyVJ8RRdqIVUkpU+IxYRgq+AzoFvo834+YODJmm9YmZPRm+Iv3Bkxi5BxDelYTo0lwww4+O5PYBN2JQeR8+Giwj4VPOhT54j49Fvx6LC81PV
+an1lxnm7eCYVF/A8HYM7Tv6TDcB2j2rP9DeaBkYpLWuypGbnBlB9vTMLtFba4zoqYBYzvrrZzkvnupuvwTtiIi6oZ3vald6fSDi57QSxldIy6DHfv74gA+2SLoUDITLW4eBwUKyKMwzekKLz8G7LSxVthId0YbKBjvY4Xy/LRpvZXXyFwDXr
+7VePuYUjeZEO7c3Mim+p4r9OXPZ0uYeybRl3Hs2PZMJgChcOk78I08p01Xbd2vfQQsHPSEo+Ch5nOUHhIFUxiy9tQrqaKwb/qE9fXjZrQT4XA9F1uo6Vqy3v2yDO420La8SjOrnzBOJo3M2ydOVYJDOk9fIFFhmXP8v0B3P8QBZ0YR/r17WN
+Y4uxOiofX9tLuVHjS2JZZdl51IAtduXI66zTbJqi9HZqBHaCbSwm0GnP3jeaexmSkradVhQNmTaVB7CuqzJfXwydfo6b9UeTHE2ZS8q0q9a26pQXYPzl6o9p7tNNekHlhZ1Xg8fZx6nXesf9MgaxalpWBoGkbV27WTZOuLnooSdOfoj+lmgV
+ZRuk82Sc90vefNnxXAc9HZz3zXhm+ojPknlCuxjCr/1KPclD2E0FDkTqm2e3aJZuSbRTlGNB817qKVkeKV/h854xf9nzb656qJZCOdfNXvrtcw7eI3msm6KR+LAWbG11cAQChFt8MTMKG6IV0tISOarx/4hP0P8lQUXLNCHTn2F2zk6pa3oT
+eIOsF8gus9piTUXJ/c+Ck5Yecibhqs5ikMuf0XJRwkr08bET2QPsbED91/hWkztU3dTGhNMCFKi6crdtEQacKIpvNepNWpvHCEJsaw+0IkeXN4goQZfPQAicwPEZZjVHp4qOU/QUOKjCten1oJ3Xj9Qh7kKWL8MLMC7FI9vVHWood43YuMLe
+bqMSOTmTfqubhanYnFmoA9pwLLj2C1ZReLQSAq2A0MOZYO3BcDSDn+/J9lj2i/1faboVp4yIlHpqku6o63cbIsex42cBBFGglgEyOP5f/AuQR/Cw8BSlNAMe+cLqhwGCV9rd5cRQ8NHZDBo3pMSX2c3y6uCv72XHSxt/jc8KUNBuvtjaNqa2
+OfgYW46CqyQbC9b3vWVkXr92ehqdGJ7SgEF5NBCyxSevRSIK3BYWbgv0RnlQlg0YAXJJSN2bzsHFDg0fsY546mn9GPByjFqmOjWTRg2Tecl9bGq9nCY+be2ZXmfDEDVy90u4d8dlUS4kCueyW4RmxVQdID2X+ub5jqC7M7xfTB+uSEQKbtjH
+I9TIiLlKyJEP1N9qg3TF+/9VmdvLSvO1kxA6liRaevUoy54iQyjQ566kxJnulIY1ycF/8hZAUt9YPQ2zdg3d9VNoAcNWl6rqSrmQowKg/ETJB+Kuz/oiqbdWkYfopCwBzATQV8OA80cGiB3JuBofEzbrbot/1GV1A+39bQBgOA6MHjAkhlXm
+YHLYy324O5k9aD4iOqwoLO2QBF3vTYAQT7so5sbPPD4EKdGgMSNQ1X2tqbD/RBgAyWKiFMackkgIQYx1G+DtMk3to/J+5YOt0x1mfMzpfvJmPEL/Hkv94Vneq2FYiUDnsLJRlIwqePGMjSstye40c+sSW9AnCVWj415NohKbE2EY3Dbbh0Ia
+ccAUeD7I7PWtmy+zhqydZQ6aQIN3Y3KtTvTisjAkc7Hlq22EHxpB4irD22kQralBxxAGtkgDaKLGoYhIZqNkT/5YvO1FlLoXGCrf9BXCSOdM8zjKKPVNlmWJgf5zX+tSKHXjl9jCE7UIqA68S+yFxj1YMkYncdF9zj5/7morsp8wY4ogzW43
+YdERKrIjVG8qoAeeiYCpY6wUEQa6aTgScebcQJlE01KxLu+uHhhY5H7qo0Sj7hFffUCLvpi2fbQ/GUej6rY4MJx1ZP7ymXuvuJeH/lPjmlXKTPaMfPypSmEZuoc5QlVTtMrivkOH82OOpnXAfR1NgwpV3JjVToWun/kTbYMZqdscCk7M/6f2
+fzH3fwQ0UXQqQD/swdOVZWO1D7V4fnnYc61qpDuQF2LG63+9viTx25QV9R/OKYnV612lyRk19fXAJQeI04NRb4pHnf2uADCQiQlXkL5Dkf80cvNXNWmNTUlknYu4gnbtkU+Z5mhh2XPCTXwjvYWLmLdseeWA2tlxRFnOci1WJM6Z5lyJgxjr
+s42YgwLJPDhLT1+eOXkxvrFGYhI/4kOZzWL81R0/hnOVjbqECf8uZPfg+MQpJGZtw3Cp+Dtg4ad4HoCTC/FYpXZRY7XjLAFBzgTSmLEpqhe32y7tibRcBOwSBByuop1BM53saMF/xD53zT1f0wXG6S8hW3j23M3MPLLbXDB481iB1nVOU7yD
+v7RqLpZ0g2D3tNjxTYCq4Pgth1cL3VpqbQzbYUEmj5YQG8wDRvpULlFHDeeYY2wz2vFtPCqIMElHCdiGRFoXElmET7Qh0MNFP+dN2w9hEn7tvXK/z6RZvK1M+7MPUJGZCCFt7GUnk2zkUQGz0S0MXXUhsH8Osle0o27zrhx+Az6oWY4QTfCw
+S+jPR2JNk3RtbXiJnTLHQq1/Ev7XUvtBXrusLMFVGmZF57vqaKjNySaWbXU+ZhIHDxAnacpklpzQSg1trmSwQGLjd/QzMqFShlFyvqOA9UCG5OIwPysLhkNjK+kg5SEirPDr4xQQFbrMDx2Vsg9Ni78knBonEgUZedpb1Yr32wULI23w2LEw
+0kQaihQi0BV2CZUuTR0eGrSK4NMGravyT+TRJiqNbs7CHbz4GJpzSv8Cex3ivQgb+Bk9s6wabstn00SycC+zfVxKEU/KVnlHZ87OGFnP9+eoS2GJPSAAX6tJHI+4MtqPYBPNd5MpgmYRQZduKdiyM1+FCYn2TD9MGmWb37Msu/M69Pqdh2Cf
+Miiv5hoG44bT82wRBgalZef9yGx19koRqzVBYh6SF3GiBsy9TjZG8hRyUwnqGB178WkbN4AYl8e5RYZWi/VELq2O0GWXiF/a9N8/L+SHVmPqWzW4OcN4jWzcQDu2iPLNbc3kOO6xnkFo3rlXTXvmbRaJg3mYBJGKOxnPkcPlZUcnd+1q+n0q
+vVHucs8SNlfcem2UHfzJE0rDH8bfeB3xhupSe7Gk3Gz9nlmGKsSQquoNdvPtwsXn0xQXyjSEbKnodif2Q8GLGaf1C8+jNnpFM0jvQCEp+idu4XvZCx+A9CnDuPZI3npAe5VmGUgfuLzTRslwKMnSQ8WbHPKz8Z5Gtq1RP0q0nPPLxV5/c5H+
+B/LtXV9A/fdX6GmxdAwGDFVwuwYzoyQHfDgtITWt0pymToEgIzkIG59qIp8xKu2vR42f0gPH5uebLKZQY3qXPCG2T5dw8aqNuzuffYlnW6Y0R1++nF9uslfjG3T9XIojsn04N//O/bfdc9AvEzOnaV1SoofcmX+c+PjAA28wVh/rgxHBrYCp
+5c/eJQF64YF7uutIjkcrxJ7phcWae84hQgYFshtkbMiTG4uQyrZskvTG45FENFXlshZdWfqHMHGJCe0VuMB1afEVajkRsfnhDy39Mcnwq9JbbQafZBHhjtoYr8YOi506L7DprhJ6wLFiKLh1y1f+rysGAj20v1hWwC2SJ7eW+eKS8vlLlsq/
+OrbVSUB9XFRgs8WsOtcz4QVX+Iyyt5XusgY+kATDJIxCVwzGOWmCG2zHyYhbsHyNoyBZRxspy5KPoOnvF66fA/AqAayK+c+tZq2nOQnKLeaLKVFCLOwIjUYl9njQXCl3Um28Mjhw/jcKEydRMFtdZFqpEGRGHzkUBn2vhLNv0lFCOO7mMf5k
+a8tFqV1Ya/OLGIm/gfjTTNXw7ERw6T36F74/609ut6gpZgKQUa0wHXsUnI4pytfC4sEzQo1olvCKIA0h9odVVJQjghI1AfETpsfGjKKOdipViKvFcdBUP6/gcZxQ/+ejW6AGjNqDV82eo2MaPgeNpl5xuFHY7FiC8acrmqICKpIDR3HBlo/l
+mcye/be+hs+1XjxY0BbD2Jcd9XsN9OaEfDrPCuGoAgiFCQUwZkbn9A3nHfn2NEfTbySEQEuhdF43Lm83/xXkikYwreN8a0JaYb+YxhVnMad2Wrd19s33OH/iixeyNnBDxHiT/WoH1lUBjcMv8YV/Dwc4KBzVgPFB7iURdI4nGS+hL9Wrihyv
+MleN6BUQvzANU9ryrgoI5odXndOU4StzQFZpaTLT1s10zYfRv1B9Zy2klHN7eJyGK3EWqFaLIQnUGW7XRu0Ygf+B0jcolwdJ/nVKhyCj3vddNRsa4BlV05cp5VW/vU7ztEGQ56iwVYwr1p+PgCp8CQtJjOj0SfB/zF9FlBj7z3ogSuddFmXq
+p4hUz8EJLMNOrXiBIrCghTsxKAf1uasr5h2LeRM0YwkTRxbWNu6cIt2NAiguA3Lk3CI/NhEsQoTZKQeocgtPiIClkKalTl0I7OV4hMpa7XYMpIyhD2RU2nAEhpQqE3g4ejV+6WLLBHzohDu8tpa64ZepscWsSQQQBoFIsingpCh1Rwc2pAUn
+kIfKJk8T1BDXHX8nBfRHSiAQ09qf7rHn8SmTSBhbbZHu1xQFBZpTZGpx9sUPKNqEdt1cvh/1Z1pGem7yf3uYbihI7yGLq2oLJek9A34zlrfewb/a0COw6XBDAJz9aITjx32eMO/aQFm5tfbw++ApL6EuZc+LkfYMMAjNg6N474+lrCpY+W53
+T509bLlvz4P7AxQtiQSHQx/bSbYMHMmUKqhEwFiLWklipgs2Q73mlnIfZh2DVtrMtyDJQbP30bHKfuI/V9P8JJxGatMI7UXwtVFIGXjhsPYj4kSc/HbnZUSXfzm7hANFTGaDURpGOxX/zEu5LSAmY7+kYei5cS2SsCoxIryBbYWk9Wnwr4+a
+gKK5Y1I/iUON4VhiTFToWCPScaGF1xbWTlQeiyYwCu2Q3COUMcpQvYWEKnsxjyAlr1t7kXyGlipkUt2MjeLisJ45+ppHH54kV1NOUanCjgYPCO+IesEUCWNNHykRnxQIDmTyhY1ZiZateE0hw9xVUKv7uhNGryswWUJc6+A+mvhR5rDweHex
+kS+GGkq+mH/0LRN33NtzwvH12COdrGcQyfwSJjx33oDsgw7oY5wtBeLM8nqALIJNrI5D7LH4YjApkB3I9Wzsb6nLAIHDkS4F5Uzoqpimz/UPc/jVC5OwdFupxPDOZYlg7SdY0QQ9Imm7jbGRm5tgfkAwWP4imAm8UtUlPmYDs90yohSjUxJd
+TScm5geI8f8Cyo10FV6sdgJbbdeD0iTMh4b2A5QVS8PcZGAkZMDRVpwykCPem/HGDKdAk3wNTbTYz4IO0VlSpgObPedlDaZg/k2uZ246Bt9v4Y/nx1HSeVRSd8GXC3kBRO8xyPbCH0CPZJKNK3bTHrDGgEmh6p2zXFohD8tljo2RIe0JVxbD
+hL9UcdqJhd2crp+DZGZWWOwl4IADUdOOcSY2egfiFm5inrCKMStDITrN37BXIyf3JxWR2quBma/aedWo96k5OnQa1Ifyz4foOcUqxTMRERj24WLEE5eQiCObwUzlF88G+zEkDjnZ9fry0VJFtjkiIYO0U0MGDW7GbvwWJOuunVkQczWAGxg3
+VQH26l7wqpHzx1muTITrQxoSCmyJzPFUNQ6tT1QvmgwQxpjBGrRWUIHZRixnKGOvzDLIDRDGySfnZ4lMYjpNjDT2IVCmXgZhXyLUsMkmRJR/BhXg36HfPNIpmpoShBf4v9DHjqqOszKZ/Jip6DmTpC69ISs0l75W7ser5vhaKVt8CrPtanLm
+x5gy73J+mvK5UqwZjylwOjGeKGpHStdZ/weKF+5Q3Sh6FAQ7ce3tHU1prIJZgesXYkJfTL2FXD1RHw+mIFzlOncRf5X7HS2vXLiRmpyT5Th2KF6+IvrZMBgj6DNu0DjbeuiIuUhaBOh0RADvK+0Zg674MmgPynM1Hu9tn+bFEGogpFgCF7C0
+06HPnbOaNE6uUl/wso804Gjr+9LQsTXaiUKENPJp5qPGG224GJp7nobwozn/wPXtY9Plgl9e57cv3gnTThtjj7KftH5ZcFvwO3DQNxF8tQoJ4DrQt2yzf2EAUmravEJmCkSt2U11ydamHFH+AIX9Sfcfvu8WHySmB2ulKmLeIMPel8wFsWc7
+AFxvL6ih/EcBtjiifb+E6eqLARjWpI3ikTS5smKhqM+iIrxaqRovV65M+98UHhMmKwtRJZfOturVHiwLfzVM9y/vURFvKTMpjWBMW/OGINE1vNNFUzcMu8mdbf67j3PxGcV4zfwdfWwwZnWTSoESYxHkAMKWZaCi4uWdWhXWl6b67qcSMI3f
+rY0ZIL/UcdYRMhtiPMuvFbh4FSqn97aNDJa1OPhux8qzZX30Aqm9McxzkS0QyphoXFM56L+LSbqQAeCojXtYQRSco3qVixL2fjMPBpZURrs3eJWSZbol5zY61ZjbZeLxYn4BEFGLHgIX9ZDCfKOcWwQDdYRsNAAX1BdOgJxrxvDtk/eyJRt9
+NDLhqE7BiSYb55MHf8Bt3msZmO4GfgFUVb2VD98XTis//RNYaZnXgxqcYU0xNCoqiVrOBpiUB4J0sWRd7XfqU9G/p3+b/b2UdT/LLFXNiZZ8qhhOq8kRP+a+9HvW7008jrhy3kCuUq7OvQTH9xJmjarpA1RNtixjchGp2IOkMGHZWZquUREp
+AnDeoB1HEHGSlOE29AY0t1GU/dL6UEVdgXdWDdRV98OGcCqUwnUuMnVAdPxk+0AMhG3KIc9MDFRiEp5xIxobW9OhREGqzh33hZr0VGhBWSpnddwP8d3agPdMqTUkDzF3/XMrFPNmLNkb3Bges607zIqVirjTAxXXj3LLzM47B69ySrGmsCtP
+Q19Q9eFproyHC0dwGJslTG1wpATP9ANvJUsBQkyXEQEzGNNtfIp1VVaTlshtmsX4FL7ZH1M+MjK4BWoE4JVKpuWPHl3QNze+CUqH0HJcFucHn+zlReSgbAHzzaejFkrm/OR/MK35ddYHZDQTCBc67xvhw8bKDWeN8kXDtDV268G5lNPa7kuh
+hE5G7LnnATkJM/V3k3OUwxj3mmuT+aV/yDOpczy1tcUZkkLNU9/EuW95KDw5hbGpI1O+ucW3tDcc5IPzTeBUe/nbBgyshgpZmFcTDQgdMqekR8I6TFdQzF9tYovURJzJTREX8tWBkXOE0/jWTjDGjKhkuahZKydcw3h0DHwfL/2fFYp6F2gD
+SxykP9UzeAEdGFwwQEkZp9E+6ORIKgYHj3cXrHyLnhYgQtYKIqp6cZoSVFi18vAadsWm55mzLKg2B64o8yJygM8yUtJUvhVfrvDr4lG8Oa9zhQ8LiAnEqlNZm5lSB0RHWJAOLNzgVv9a33/uZan5lIp8uq0JAtBa7SF+Sk1rgeNFfB9r4rTv
+A896TMpH8ZytvQxEfrOow3fB3eoVcW0SI7PMQWSRa3Nt6s0lX0zwHU/PDoyrzkT99ZQstb+6VCW+/zkfli2+b9i4S6R0r+pho4j6d6q2dIzkjrgDeLRZ1J/AgRM4to98YPtxfgvGH+gzOVcC0NmYXLLW7zuAaWh0KtMwCOzsXiStv8Us1Aad
+rUrghP19zOnYnsAtRtlWedV/0v7glwlvadDTFe2IOIU2iF5BiqlouSWI+1k2qT+rplSG0cw+bWgXBXfETtbXPUei6S3y+6HNcgTvJ8EiYjXDzW6GpFNgT3b74jreBqL/TKIyA/L3chTKGWrUmjQ+RzjybIUWNPUd7cew424EuQFpBxGpgYTW
+dqNWVb11Bog3qsBzRq/Ib/FcGcFr324Vyu/Lv/fHLPSJfRyR8Zuta7Rtza4P43c9jrKq28hav4t8pLlOrwAQuHwEapsifmi3mtHY4gnHtk4DjN/Kg+ebNj6bUEtYjPL5aNOiud9wUPlKk6PDeiSTBg4ToqBvlnuh7jENYRIZYZ6qELtClQnb
+YnXrXDbwJErWdrxBEncRfXCeo4Zh9m8FEinmDms4eRZzyBzrti92SxhrNHI836gBy6eu1zPUYOkjG5VMjMOeikvoUy1poV+0P66iehBr9H3X6Pp8sRRDv8U6WAG4vRYC+MKNPdK5t9FrltpajKpQeESVjoD8SSu4/6g+ROEq0rKaP4e0wvB6
+Eger8ZF9hZVraRYRPYT3vkubn+Ipiyz08zHxcRbpu5Mk4twzmB/iMpsaOUwkY+gvHKQTodjFfTiLEwqY+7S5c1in2qna3I0BPvfkzkJJ74L/+AcAc5Ne0OvbCqwgV+4IkaX1+PcU0XKMTmZb+V7Lbqtt21Rss4Elotm/n9tZUA4ysTVht+Gz
+yd8yQzaXwlNiQyPhvsMDVnRbM8J+tneUAWgLlHO3zLvALrSPdIX9PZCj6mO2Qc4C+8EmlsgsCMyXjAEgqMsBwiEdtM4WMy35ZpzSChxLguonSS4rpuENq31KxtvUfSPdOqqZh4c9xn2uwS8ZyvTvTiiCa2YXGxkGfVjKLUzso/d3B2Q74Rl/
+4aMHc+Xu0e3P9tcW9wk6cYKgjX9/zcduEMYAkJDZHUsv8H3TJUfcAojw2IUTAsbHg+faZUrH67BznsY0m3UsxjLGD+3hkq39IDaDNmDcLy64LVnFnmsublmVNEhRUJOIlLGihGf6gtwOiHV01o5SYgCTcCGFM3zJbvOKccbwWIEPScpj7bWY
+Wmq3pI5a06+SvT+SEQsrATJ/NWGdB1HXbb5amb/AP2lRqAAJ/8Zkw01/7Nnixje8Z4noXvQh/dXZVJw9I6rl5aEr8baHLHOjmR6takhVtIH2TwnVnjtSNa9F6GJpqX6/e1CqGM5nYjtP1XYXqFw7KqDFs/MyaWwiZJdFQ/yerpKbQsoDciAf
+jYPaTyXmnfkjSlQRqmQre6Qd2lTlfGIgsGI0IjTLnBQQZBj4N8tjzPH6q7gXxJmXTXv/sUnuQtoPrxWKLFIL+ZT4rVY9d8RlOf6j/4NOy3YMW4GeFXEvOURkAbUCP9WKDhx5IqRnA8Rtv9h+0XdtP96r1ZHTfnD9JY0YvGsdk1E0tBkXohYG
+eT70OBwfdEDqNAx8O+HOBocvax1l1edRNImcxhXS+E77FJ9zGykK1yq4E3dBdRLCs1BpIsG10sHwKXMTZUx2rqpdTrybD7YHDSdX/bnazhxJngX746e//HCqABOphoykVrtw/UpeDLWZ8KQrWZ2Ky3GGJrwaIYQDisTBecfxEQl+RW5q9T6m
+ZalPMRgM7rHGTWsDZA4ewqxNuLyudS3iHmmRygwQ345onEFpXobvvKuuWgGGkFyzIMeAz7UlWy1XdMCUioHk3ThwP8/MZg/J50zIaNYMkC+IXuiZ+aC4WT45WEuxhiAPueIMAZEXzJoiL3rgoPGj7Fsx34tynppvxtPHjeUaD0fuXCZsRrVk
+JMrfW3lJio1CJ6+Doz/C+Ujcq6eoBzK2BflrYBuG/UQ5TJJ/62BkYcnJyxZQ7Nxw5OeGI7uCsMORTfrWSORL+iv+CfblXlZutlQZuQce65gjlGuqcRts0RZRdjXdy3bHuYCAeV2EsGJvVgjbdwRWPMKZIXSuE060eRzZHpGkg0itNHisCHX9
+JjufgVw9gF0idyiD+T+HUSSGdKmer9JUWx+RFsCKC9yV2FvuMRxyY9wh06FQm9su9rUBa91p5JplWipV/22YdiWSZdDhBx5o5P4RB6JxXIbab9xuTJT5s1UC1bXNCt3fiddHzuFJTwXcMM+X390ar5ZItMvfhGtgW3twey8tlqLyhM5P7W7t
+qYA0nBgFK7LwGyLIKDlmgO9xng5wqXuopAV1rbHQHPfFwP4oasTIxn1PKgEroAw5sDoChy4AEZQfZsVuFF8NqUR6sROQpQF+N1FeXajqNQo3lDXe5rFCT+tztyN2bZ/PnMcEaNvGnLkgWhTpPTzddj5nMZuJiRKl2AYXUjj5XN80UOSjFOSu
+KwrZsAIPosTeNxPbCNS8BLA6KZMeevSme9iPm5IXvv3jD8nqqaICikz/9j3+JXFCfpY378jt4ErjO2nW75+AY5HXH7mThweccE1KUq8d1ayCLkVcjhHd0Hci/BhGnXoFOr9RtaZXCOCv2xnd7zZlBp3nT/ukFbwrKDGQqg2g0Z4Eckga0k0W
+gLdX0BX3J+Pqvk4ek953ogt18cMdVhe/8GsDeo2GqUdTbz+SJuB7F5pbAioX/s6tcLg/m78EeXwu7hmG00t9uvtwkLcBC0G2ZJtud5T931lJ8WfXZTQMp5PZuee7W/3KoAbzI1XTYK4FqiS8+lHyVcZ91rLf4OsJd1CvY/URNvd67uHLI2TP
+vKx4QmRkDkQgs86p9QeqPPsEwLRl3DEoYXih+Wye/t9yRWbayN1OzICuYwyDNvpgxTiBXBuZnFaGBC/4WoYw/8tnchDe2PcPYnzGCXzm3gNtyDzDpY7nLX1BI8YquyQb+hsFCmtl8GiB+8VhFqy7z4M6G6fTSWEJNcyu7QTPF7UHp0iYZCrj
+mYKOg6rKVZ3d9fadcmxv36m82lD+eFhshHhX0NgGqBmOqW0ajigjxqojAhJx/tj3nQh6xUEAFkYLWNfq+tEFABEwxQtnt6xC6s4nfbOCg4vo4lhf670DXiPDDzL7GUcj8K+FFeSNckvAOeMXNG/gN6EjwQD7x8Thtt3vaTeK/cZvE2tMkiEx
+73ziGCczlaMTgAgEOkZZeVERXXnk4ljai7r2Vs4eSpXl3nn7XHKgyBmiYE83zktnCUfPohZ7G26ES7PnQ0qWRuNWyV0k+aK4TBDbTqdn/6mX1LfeZRJLra51J7fz/wJsLmYvm4/1PEW4SHlcvTll4oPzmnYEqef9lWB+AH3z0/Em9seHYfki
+5IYRIEhOX8U5Dk7NXVDmVXpSgB+7yB7oBAokhrU8cXFW+huELAZGUAgccq60A6iD6yp8q1MSOvjPuijIRTXbm5zpSam7v4cM6IO9rzqQriFxGJDalmfjd3bP7fDjhRRyYgUbNikqxY7iUyfhNRtVUlDxWdvqWICHLIjZYM8zXF0KJ1yp92YT
+TheUhbk1ALCPMjdmF2oVCaIrAIx+YPO4gLG4EAcvJ4NE2JEc7RCEL+4Gz2+964TAYTQ+UBB3JSA+s3/UNDCHC4H8vlqEes7lR5eMSXw+Q0OogEsY+ygfABgrl2EkJKNJHr969n3C+DXpgFuua4zH0ubsXi1JYfHtMAFeJTL2vuNeUBsoih7H
+boiwFGN6K7fzzfOV4buC9RdYLf9wXV0e2aRIaEoEZGjqZ6iWX5nUhWMW8w7YfMoSur65MJDux2grlFDSvAWbOM9ppneMoITj+mVkEQRoOzOSV6Wp6fm7eZbYFsix7lYlXL/0DvI0pYk15iDXAOf0vccDkQDO62iJWafQaVc20nLJGCuYVYJG
+yMsT5F91jlIiMQbZr4xiQJKtJpVituM5L7UJVfvTzMsfkeSRBcU7kMRX4uZ8Gk/ogB9ljAfoOB0TTY5FEHbi20JeguddsfO91ZeX84R2O800Ithe1/3IQwr74vS0SthKmGgh6vKTbSAcUU7Y2WPa07Bpp/SMkh2rRl63OFIK/07IaC/IsCpU
+BDfKoaG7F+VbqLBR/Qh2VO6zFG1dlFO/z/HMUYIaB8Edro4LWl3iGQ6OdrYwNH3vbN34KTGjUM7pVCTRclErQbnzlOhNQCeM8bDpDtDPYE9S5qwVWV69bCMFLIBhqZJ9+gr9Gy/wk2/yZJcJm8KgTwIvLBCUZhgv3Oj7wuUgigIjMy2GX7Yr
+P2XXbcmzQGRTj6eF2SPuN89Rd4ULdGjKrp7JN+x6gsss7odOTOigWrh8IKhBgE/NsJ6ir52oRsNadsKyye11PUOutpvHdAmzlaAQxROXWBvpTADV0auhQcHDKtISGtPgQ4s7C+B/eZH2yPEvNKKzE5hcaGex6vvUw8ccJngt81MvvvhnDSwv
+uB+W/w8YiDmIWL14ujlgoM6VahOKSm5+5mOQ/R5EXussD1PZuEtGyJkbkvB9M0SmpFRpodzVx7N0Sk/VrHvkEknScEw0Ns7c6FlPRLqXSGvlRk1NuEwq0XpsUKcecjTvd7tQwAKKDVBtQ4lNskN+HXaWEulKkdNDxc9NS5FP7DWGCUR7C7nt
+CbKHak17gtDR9PNi0bPLZsrO8BVMIKD4wCGd02pvB2Hg/7lGY9KPmUO3N5ePBMemhsojojvEh4p/OfNptT9lBhafIUgqIW8HwOUx2ocviiZCjjCV9cE2cR4M/0yRt2otR1e2g5ftNwaV0lAbLAXRrA9YvGvIsYxblkayJ9FfDiLfuNi9UAqG
+gvauP63Xx2xr+3083GyWKEFRQlCGFu9AdKiWu2dvJ5882TyMPMqoEs6MFuUa6JUbuJMm7txil1biWItLSNOiTJJNPpI7H6M1RrrYCxevN1ZsYY3fpxv1qLtbEYyfWr2iW1DKpghj0XFbEY7Ea8x5b4+9TlY+yQhAiEXn+n6OhrppiJJqEs2b
+maitFBC1CqRcWZn1y2bmGePlqexBL2J5bAhl2Fw6lZU/sVMRJcFJZZl0fqToB2hzBAgZTNwZLaCssnm5zMCKqbk3F5tt7o794KGgsxKyiiG2zVlD0sFZNcQXZZ6xsuEhw4GTv0v5maVTzgJDEiZ1tZAJGcjwCczUJiZAhscGer3dHhGp9NHg
+mpIx2W8yB2eKANfdvFb5x1YTMluSbA848sxZ2eKdboU/g7s5sHFukMq5cujsH6u7GvjQXV3P/FwUoDaKxUh+rHCNrMRTfb7Z/9nAeeEqjrSe1oxo6MekhFOoK9WeEKu2uNaIbwP/7SXLm22pfW3YJUID38UAwEcQyych1/uLpSEHcldPvwAZ
+sycUIcxEhoooF/X0IKBFj4q69XVZDJCoQkTdTaMjf03D/V/7uoYBaQfFXl1i+gm1DZF2e3aKVDotEXBykLmlIUo7dD52usWfcStXRoLDJhOi2FDNb+/agr1ftcYXcdrOs/RGRqO7RNN1ZNapbLkxjFrUUdBT9pjcQC9F4+y2yif404juTNtI
+JalvoWzkLEbnYXuby0D+kMSbZLmnatpWJXIFzUekJmbxUHwu5EpIf5NQY9omuKkr0KfmrnhiO58qLzNpYAQs/tbUnUjrwqOtSGSJgNnLb2y88T/1uGijOeDEnX1awKIxQBK4unRWSmKPQpLbRgSdAer+VBUlKxWWS1A3KqaukKxE3PqUHlI7
+FjfrAQOcMQan+FGdqRQ1Z8dP8hmsKg3YAjD4QRGPIeWOuhFeiipyAUE/mf7ktKngHqpFDV1jPNdUuzND1Udqpri0fRl++TtnJyP0LdSDVbtJbiRc2Foq/nb/RK592L2dSsZqKKQHY8yc84AatGjDnwl6n6FuqrJQQaFBAhlLzbliNj0DZYEm
+if9r1fsvKxNzya7PzmBtxprRywsJo2UYuSYZFSW8SUJ0qU34gWv7vTBI81AfuqE1xPkal2MW5fRrgVI2uenuNvZCmCFTB2WWWtEI/rQCrFQwLfaUcO7RZprPtXe7VZJouLWdlIKtmkv/cWdU2hkEc/CB+iFpuapCvvS8N7Vxz8v5i8K78BB5
+2tEm4QM8p6Eq4nCKnnjT6amU4Kn4GzjJMXtL2SNsCCj/rlvVt8rVy8h+aQqQNvNofrWKFwe/i/UQIRsa/scs/LImmJ7rXTGiswo81ZwB9Ufn7KtLwKHc5nf+nKopcm6CNxL38e0nhdMNh04GcRtT8cCycaig8sqg3W9k+cJiouHSjVwVuFpn
+vSCFeZBtU0hjxHtA4maPZVbCetjkhjCqkvyf4HAHx5I5p+7B0XZJpIEr0Ki2r6MTdOUVrZgbrAnZi2G7BjZ2NSn1dbIkKEtDy7L19hN/NoiYBvn/2ZEJL5iCP40S1dBzbi3nNXPdK/h2n6mtzyM8LavbTvKskixOr/ZuPMTkPgassJzVIdV0
+YtzGO/BPLfe/NOkUjfEffpl5PWGLtmWOj9lh4ujmSDOVVofi9IEIpUE2wFH6d3AL2g0UfZ8GzdmxDuNQCROgeXkg90+X5SU2oZ33YXxco33H0QJpdTbF/x3NxeB7ZYzRJ5ZJtEQYLx38G2jOTfkZEqLeEMjph3y11UZQY5JV8RXxJLmnHylK
+rDa/29e9vgyWvQfAi0km9b2QybWP65y4M5JufcjEczO1RHNJWN2bhM3K2t3X/BICbpHrtNzB20565NFsTalDwqM8U2yxsqXY2jtj9/GxCm5LGUhsGU9ubFOLx0QwnK2rxBMP0DCap4PkEZcN/h6+OFGV0xHDnJyGRncSbi2pSplDn/hDkyFD
+wr5oLXlsY0WRKtyJJ8h/eMXlhWZGdyaWO01nDv0LWWDkvoYuac/GtlxLrPA/NuHyQmsw2eXgIwmenMVBnadEYNQgXK5PddRw/lSvtWXCyHLapyr7RW4MGcmyWYIYMi3pfGCN14dg0KExZQgC5yrd5exrGo+q1w9hOrOgWURx2680ApSv0qZR
+HnIP1oA+TqQDzuDrhfnDe4emgFY6QGI8iPa+3tFfeQF6U+eyp0lhQTMRvihdqtSY1sW9jX4r42d0wr1oUORIeLaOMPE755/kIT9lTYdUzomnUmTkxZ2bqc0D89Yr+Ochx73DDcugE6Tw9QKstzHNvo6cjjlo/dnucijcdawY1N8ulnYbkCbv
+dt0CtStzGNspGTksPd49+9qDEcwrekNdVf0vYlHeifSQh8oueX4uQW9RwnrvROTBiy5oCTCpthsnYNsuxhH1fHnVDVcW7jf0v6iRirF1X/CtKYZZaqE6j+4Pn4qf2TNT9Bs0eTTCaDalPns8XelghRar/8GWx+CdrrziEj2gV0GNK8ngr0oV
+oizkBRO1qXi6B6pKhNlJL1fA475jr5h4Tz7ZCpQ/jzO8+f4XYYSw3WiDoyv/rxva2vCv7XMZSAElYyKAMtjzE+vEJZx/ejrQaf1CaGWCxuOSYkEF2lOK23RrrFXsYmEvts+AdsQbifb1gvYZgQZZAdQmcfQRy7jE5t5waCFnSz5aPnqg8iBB
+Ac6G2xlPxMcgKCXb1Dh4TfBkTBzdFvywJxV4M1U/R58eYXofgBrwHhK8GfTFwQ0/RwFYkolBuXm8wFT88A0EvWTYbt8AKUbdsp26piALcKY7V7p3xBvP6VImgbMJheZMv/DuE642URPcTyaDK139pXMXBZVaCUVokNULHLrBIDmTpyCna+Yd
+3jAs2ZJsHbWCMgNDWmZfDf2LfB+KL1b95GyfgchgSvKibhaUF689Tk5ePt6s3834L3QN4W73eLfpKcDErwtV28KwnY13ojojsDFVwAyEKlYue880uUMZbir/4Zoegz9//PZm7qnrx6CxvI8BcFNJr/2cf8rxjGZzx825F7Vw2hgt0bpC+WWF
+h9Z2182Bg49Z8fo+JIAqM+8fOD1Q/ccDGVCTuvnyVkrL9JaQqQI5yYm/349S/SCwCaGgI6KU1jfh8+OkksINDneHI1Y5ntmiAl5VTiRrWibjqHS9S2OFYbfB7j7abF+XDE5k8I+8VT2sYLUbb4S8nyPGBjdFewy0lrJYxkQ+b3r+u4SUJpMK
+nagV0XMqYjRX4UyVN6tfjSMGcEuWfxvr15iI6E3dDnH/FqjSmfFBo294xXrKvhPBQkPbDk8qaeEHLJekJAFRkr3gi5JlwDzRIB2jJcar0CK4ZZsSIKAEPI8A47EqKUnE3VYx0rTVv2uY8ooQO5LqlC7pbSnSUPK6wei/bPbGYDCYPN1W/6VM
+tqWinyB297LtIVT0ntS6xiVugvwcYE6Z623wzcN+DO79UHSQcCZ3eoHtztWkHyGjp5eSCuxweD7lwImYbWs26PwY88ln4RDjE+mRT5c0Rwz5EN41zlxxBTVpFPAaqv+C2aBPIdDqa4XOPnpWOm5IVJclDmVpgIkpq+s5PvNzzIMHJZeWiuIr
+NAMCdLb5rJiy2OcQ4DoOifFivGLt41COY7pgVIWLWVUeSX+h736QXzBenJNtwvThm7ZSCCPgtn3N4lpGK8bi/40dKtTDvr1zYV2g1t/m0uoZR8Wwa+Vb3F1pYYtk0Ce9HfiN/ytXFQB0wq3dxgfNd7PjQB4XUqhclyxH9pP+E16vVmDq4D6S
+u68bMvpF9E/UjX7kE3XjM430aEyzxaRDPGN7hkvUe5A6GWUjf+u7xp/x5d5ogBLw1TjW0LlrLWhqyLCtRAT7TpsFZdqAistfECgOv1lEBASk9H7VMTKusNHV1hz6ILWC7xN1RbN+6ONsLGexEXBvzSDCviHf/IO2WPknUUqXVqCDt8axm2Ul
+CAR2Qaoc02p3T/t1bSPQJRuiJ9VVL1F3lqrSOt3eLl2o64n4b53FTs7HjRbds+2ArhJtZ8sY9NXxk+t/lG/FfTAPJMl6ia3VBoatmpyfNgc3gmWTGv47q6kaPASCRbxn9z5Jbz9XHOjtes5JFs/4U1HAdIKwSuiLno2ZR5XlHQoLymoTTC48
+IX0SEY45/jVkZVRHCGtVdzIDSA9RLH6ju3us/QV0nOKBBE7vKx9YHRS88vJofiS/mL65xfV4OWKSvbPenrYO+j5d9tzrFwSmZ6y+4HdZEBGWznt5MU2OLuXWMff985PmVuLaE1oYi41sIyk+qpsZd32AbNRMMTWOsRSBHIznqnbETDdUgUm9
+NaNAegdmiESU83tnKNUK3FrMuBnF3B5G+f/RRoRp9//jAi9JVgAud98XFsDb9Cr5rkuhcYXlT26lB56YIbfxupLoPBpuulho6SK2hJ0J9Fvie1R1Mn2FF8EPko9tys09v7N2DiVZ0X9YD8suF82xTKKWjuG0yXdvIvfICuz+dJOao60X0fvO
+IueG6RMCJJ/eg1yfFXnrcMpgtSRgZ1aQQm5YLpSj+lzU0UTg460qEGeiHqksKTDaSyFO7YboGsCCfoXDkmxHaWsGouEQcs8FmOTVQUKDE0QzYO04wBdjTjpimRBL8KlQ80IeLFb8g4M2FEvVsmI8nGjXm8xeke0FqxbXAbR2s2S+S98s3YGe
+bz3VAUZbiHln/9SV8r9WgR/yNdtsJXxYsMI0E38aYisc7688rhpYpMEhz12RD4uZBEFcJ/iJk0d4CCGjXvkp7Srd6+gFNAoqsdnG2A0H5CS3vS0rAwVPFe6vzFesZxaBJIU9TcHoDFpY1AgG3hWfANp1AZMFdUhmdA5ABWb9njiYEJ2PxeTc
+wYAO1RcxBZ5gYVy4Gr+DMoSeZrOaX/8PL0Cs/sEgoQ0rWvdggHuMT6t8BrgBW+W8hgQz7QKOqRpWv01ahFEEhoKE9TWUqIs2eMuXMvwizkTvGOVBZmiyFN9YzzlrheibNfLi2n1jTfNlDu6OuWkWXJd/LGrjHAVgMbBvjXlaDlA1RPEPEg2X
+3Qhqp6XmdyWO8StbWbm1dndWJ4B4qvpIxDQPiptvm2jW06SUvq5dNhik6/YQISIjbb2bup/Evz+mOX7nTpv6KKo0rNR1taWk4IHTYcfWmXwE9lABW3gBg0ZCfBYn1fu2HIWWrIrjutcHDG20z9O8sr4M684/35SW9toIzQYvKcDunMA3aKwZ
+bjtn0lMsWvO+FIL4g5IeyMw9388rreVGnD9CwuWFAEN3hSFIn4hh5qh8tUlxZivfXlPI0nAv/1B0F++lxZjyLY/PqLmoT/UOupAvUXyafrvTUNXdmHErm1ubisr0MPieuMaEbv1mKCRj88vaZ8cSucECj4K9ksI9fUmhetMtwrZX/s2jxtuA
+LKtd0lNdCFrjkH1vjJegKi6YIkoPMfUDHojQibCwwvZp8fjDmuuyS5MNmzamBlWhuW/drgsWNJtljAYqCPKcKG1OVICabsMzeAcAxJs+tv3W9U5xtvuHIls9bKPEmWVm+8OLP1TSrdWhzGFQJdcvT6NZr/wzcbyCyS2NKRJVp05MzfYbjnAR
+r6vUeB+NO7dP4UVvR+DMNhyptgT1eJfxja7JURd1jB7fUi03qdxuTctABL9ZdLTEn+2dIYaH9TaTjy61dPfWUVPPSUbbLn4IRRODnNVLhznYGrJ4/zNELGV4VSKJin3ZZa46aBpZY+Ya6HMROSxBHaP9GKao1N3tSAnVw38n8/GkMR6hP0qQ
+C1H07s0g6P69VJcn3+8BbFPOujEmSOl16U7zrL4SljYqMiH0Nyn67tY83ltMPY8KlWsGe0OdcW27+pdNk2/E6FTGj7X4tA714tCUMpjeh7Y/NHXRqg2e7kYigjb2EjDxnabVbyPdNno9AggHXTiUk15xkcTzLhBtrN6Gm0BPRtCEqGDox8Je
+tviAeqc7dX2TvCv9GQPPRtxJZzlTD5qpnFVxe6zooLdcSRPBcEyiTV95xO4auWsVqOBPiiWq7RO7lVGWiH3eOzCjjHAAYze7TFO2CYHA9qt+ypo67bkRz38VbaImkjhUSpCTKdppUH27Nnty9JkiyxoSiw9Xmkk5bfPqmksrt2Y723FLJAWJ
+v17U7TjOpS9vjPpctgLVA5rKChxaGQFi0yPGxTopkOeFleSxT3qeZ6JN0F/xaSuO7MVKV444JM51EDHA+oVgRN1Iq/oMY0UNHmzBq/MeuFTRuKNNLiqD6+0iM7DiBbH+H8IAmy5GKLj0WuXYW2SWw+TPrBkhN7DxpGSR2Z3C4DFMCXjliEZJ
+CxjgfTmd/o9QOpuRq5Phe0fAfq/VO0sWBHoZecveL8puDzjwfeqVaUYWOZgImZNKA5a9Ux9PKHcX3BbKQV5zbAlMV7Js/W5R8f6LaCH9kz7m/Vv7S07TnfeDCwgfMMKhDUvOJ5moID+AmBrXw1MAWlDPutCP9K7jXmf30fJUA2z/nDmAmYWD
+zNWC+pz1eGaIQY6EGGknLomQZCrSrz4QHyxVSaFxsUWyCZmyb3MnSPyGCOz1eyboiTTz1Glbkf0oKoXSUu5Yy3OcFeHDjEiKtxbVTy6YhD14xZrA9HXsw2cCVPS1UcT4RKuzj+wC/VUgYTGnWH9viXpjhB0HMnmwMJv5WpY3uamoBRoU6hf2
+Fe5kZsmVj+qRShJvdiKF3YNyXShHqzz1fEpe8mxUcvQMZ/6cBIOUmfrVGNps534M0syfkCP5rrmsMwVaSFcodwgf7DpChhdR/F4Wkxan7/MOaCesjwB2fvPTQ/UOTBJxV2iVlROSTbPornDRXwNS533ay/egkWGKr5Pqt3UYgsjSZRufNi2C
+hs8qx63OJdKlQz+UIYu6PNUBUt7pHPUaLqZj1HxyRBOPvFGUh2S2tSafbHAYn/WYkhygt5V0XexzXSJ+QiQ0peG10xAJYA/6Ek+QkBR92gCeU2oZ4s1gYVwXTUGkkFHzIgPgGmE7eOBJpE27SzDHZkPFXNaYQ9sIQWukwa54imIAhQpVSvE5
+PEBTPgWlWKqL85LI7eZH1EEk5P2pfKCJKjc0WznJRl4fAc60QJMYM7HGKhYcsRIS+64hxBJdKUf52xnktGEesPr/xVi7PjovT5yWek1fsM+14J5ML63V3ZB9b7HkRawjuqmDZ11VU7IO2XZrUaOmnjEuXCqItAibv9MZ76Jx18B11V8h7y9B
+LAupYVLQ78IVQykqjq9dxAWI2H1vJ1VfDevErCzzPzSWVi+804o92kOuhZgf34Gms+pb9E1CjDFxC4ga03rmCDgPk0SmaAreR+wFB2Sj9X0bwbzjV1C88nzI44oU3tMoq/icPpdMps5vBNOsj8i+e/4Ei3ZSUqS6RcexoxSZERRjrqTeX0O2
+JrCs0JnRmM5N1pCZFXcxnZUpjK/bCjF43qgNoCYTiGdQP4E3WaAW09gFTcQ6jpRMVNR5EXl/6XCL5KCQyTvGbNOJIVV2DrGaog+VQ2i0wK4wQFesgBhgYzv1FWAcQxkGrV1pRT3jrvWGLQ3SdM1JN9h8j23qq5dXiQmcPJPlA79NcvJ6IpU9
+rMvgcIL0YuUbAPPnWdOF8V7PQRSfbzlt2am7k+Y81G3TqWvRUla8WoU1Ql285xyXrXSlK2i/qxeyVLOR1GOqt9sPmJdrSam0pxD0YaFSiQtnfBHH/Svor24uRY/3POT9tcr8vskQzbKYXAnCXRt6ew7/ttoiNWttjDYmfxeouMUtfoQAcD3A
+XQN4dw7oNwChAsKkbNlleaMXjgixe6OP7ZXM5wF/QPz0bmim/s+JlHzVoybhob80LrZ+tcViAsAusCqtlf8jCL4w6xU6q3AjEgk8mrXb6oOVvJXnNOJtfLvTPgsP9iR2hmDO3Eu/4LHA0ILX6C4Eo7B6yxv/UF2D7X1/bCqwRgJq0mXui71s
+Oa1FxxlKITzEpraZos8OtHGt6yujE0BLWJHa83QI2V31x0TpFjVO+iP5eIonPtVLr+cy/hrvYU9iFiclAvgGpfTLv7DYUyuTqlSpjY00rb/QN2r8RMGQI0aDi09CnhsURiBbig6X2ZNl497YMB/rZo/uXBFJRFRk5CUCQT4A+UukQsr9fx9v
+8euKrrdUKHemv0GC6w9M7gkalEuAB9H8vcBPNOkBK2BXQ8NT2NgSbUZD2DDlMiQtVU1j8zt/Wy8+amwdKDVT/CdsrS7VrKOfFtQfjWxehtLPdrzTKmNxsvpOtjIjpg1q0ja+N43IMc3DH0Dw0SMKPi1btG/VO3bp5gUoMo6EI97qbUrU2Eth
+cKkGneR9GigsxKPjyO+RPgdchGGhbs+7ZMkvvEGAUb7zm9XBc4IqsSICJp8kOhIXQmxIpQ6y+mWPqOXhek26jVtaX+nlql3u64wA2IgbA0g/8JVb98xKRr07ycOTg6HZxJdesjY1Hta6XvThqQ3Y9+KZdJeldKe0lpKGrh2aaboXxtqXNs3v
+z2JbwVk5BaPFG7YEsq7YrpuyqYfmjPgWJXcGg0/YAgP+4nK489lU2LYUl9X2xgkHPHUJ6h4qmOMO3fserJh1i+ZxrFiPkwhX7qnm0fDxaqliessVDy8VDGH5Nhp/bu5l4zdYm8+LTKgR5GqeJ1kGYXKIH9b6C1R5Fa0C1HqARmXLlNGiH6En
+dHIJSpVxH4JbOLCvOwngDnav8nQCn6bdb5xb2Pa1sQDd4IsJa7TnhZDlSJRwaJcdniuuZ/QQ+xWceto0hOq7pChpqSRrA4zgWswrgOosD6TrFD43i2vAFB2L9LYcKHQnmJucIAbL+DR1HjHzKiJiwhvI6w/qzb3sSk1/DWF4iNviQJ/ZOZEJ
+a0rCLaiUvuWCQPYvSlHmiAo/GvDPwWbf1dWIugm7xEUwzKGrdX9ajYt2wb9VoAiJrxl1yY+/Iv0LXa2/C0NeO4DS19VP5PFKgP6iXeZsuH3T2ZRgwon5aUbOmwq7kddh3r6BNa19jBBl0LfsABabOtA8xH9AUOiuX0qj9wHTl2EmqA5zNcpp
++TQOalc+k1zJRN7eZq1rB0Vu3jQsOZHtGq0xtgWDzPDpzqx2NR0Tcf1+BARtsk9/fJEBPwHi5tWkW7FMDcno0PvuEovRI/9Oo7ZeDZ2WcsgDmmxY+zsKrVh+Z88RkjeNpVM08aSXS8Hd/zg9bsDnprr9OSaDmYyWDWqbVh+JjKAJX9coTVnm
+AV6GxbYLlrWi04qysTc+7FatMVvkrTNyFlOrmnPuenF8U13FH0CmHrnCZcV8ebAbeQ0PkbStRo9+VNxTBmJ6q7hNoqFI3F+zi1FAtrBwbnTIhD07toenNV0FdcaIZrfr9T+t3Y3XqipU/bao112cWQ5CltWgDCDIB0WsirwDStRAhJI3AKed
+vRQnYtZ1z3DzDyM520HqjFFRzHhT5ZiukkgopHd/vJDRjzqLJcM6Bz0Bti3lNRXEBl0QX7ttr2jCRfgth4GAKm7su0ZwNBne23fFZ7Oxedmd+5HjpP41/3xNttHq64pFFQZ+zoNdPmY1W9LvcNske7KJ5flYFRlCELlsQIcoIqBUz1BE+Y5a
+ANLTwGoO1QgqQY6RIDZoP0/gJqZJhaYzX25QfABQXiYVRoebVkazBBfsfLi53nVXONXLKqS/PnLAihXa/ed2aIR5YuORSebkECvkyHQzSOXJdAo8vwqWcizuixnk7/6GxEJ7ZgIg+5Lf+D7kNWIZgj6O3wUuHOFrQktkVTEjeCk4I/ipdFRG
+gG1/gWVw7CJM9Bn1aUv31LIbaplAMCuQvNWEH/5I2wfXFYzIgtOgLpWS8UElo3e4F9VG2+R3pzkmsWqvGsI4mdkXpLwI/QJGKx1eVW17d9Hq5LaoeZ/S1a19IxiaAdoaPc2nCRwTS63iIFLbJobpl5QuUWQDikUXNAbNPbcunPmV+CuOfrAV
+ZhxmRyaQS47dP9dGVrImCfnJDQHQzXZZ3azR4hkRvMnfKdD8eG4/c4d4etAH3XEBzHDiIVFSZuiEazBxTVBhGNeIpf076G3pcZwtkBxvBmm/5lBpW3hKi6Wj1QSTTGmgAWe3y0bon1OTFtm2K9EOWNlRW/LU0bTx2ceKCS/HFpKT3QH4i04n
+6VxxjkZ/THvjgsjodX4IOLD8t3dF5BjXsjf22f9Jvf95UOqX37QTDCCdn26vuJs+FXmROPamXpt7SCd0jm3Hj4Lpf5oNbb6VdOa8Nxd6g5Jg2GREO3VhbZQPRF3hTKFund3W2bPgksiTgLcwI07AYFetTVFXSND52HqVP9B2GON+pEkTHGrs
+FguBAK4EteA58Pdi7GD/UufxlNcke+oShCTA6kHTJlpF3Oqq8vc9Y0J8NXRFrilb43jqaPdvqVWtQz/yJveNbC9+OZUfMib2YGYizr+x+snKgbDb0pdqZU0Spl4Gckk7xtjl4VIAWngYVnPgFp+q93lZ1RrqHvyirirtmHkIUU77gHNf6CnF
+MX4QnYv89Z93DoIgN3rMoWCXbB5CS/LSLyChu1OfTPVELtGg74D7bTCYRTm+D4ysrL8qOQvQ8zzOIrROIeYKAb/TupWgMpcT26LD9I+N2n0kZ1jFMI1ZY3NI6BP5Tmm41jIN1jNjuOe7dpdcyUHPDqpaPM0MIBZOrOX4r/FlH7pjDlVb0EIL
+Ol2HCZfCmlCk/6V5bU4dByzliInVQXQfFomZ4nzMZ5YX3KsMBoH1mBpVSQ4rrbjNZE0hg6VmdB2tn7k47B7UaBP068m9S7CUfYCXm4P/TygX1W/B9ApCu8Fk718I/i51cd0slkMwezbH+NzLZcTYOjRDPZnkkezZaTo80vJUE0p9EsXqAVLc
+tZOrWQzGeMx+ELP0h7OJ7yTMyLiAU1Q5iuCW33UyiJognKCfgXWpfdpNkkriMTOK9ztNQUBj1cKcJ3IEuE8qTiKG6tChDq6BA4ZxV/8FZ6GOvGQSt4soAU45oKTk2q5CQV0Z/7/4mZKy74ohCBI+2wMMTfRFrhYw5GG0zg4vuKasgHAse7LV
+7+uO3ch0rS5lp4rYwifx8EfNh3PPL8VBc9P/pY0K9Hs7rQpQL1uqa8a0BBzXe9avMd35cP+hja6DeWvVe4qDYPT+MHzLPKbNwXBRyMLCgUVOiF4QDAE4hA32jt9KS+yRo1qU97CzByTGxfKIFYEPWkAaP6LXX/UhP3v376/5t7mOVW3Ji6QB
++DODinrxoLGX14s3H92l4OQ8WV27BTVTNIAsvnkZsFJ2EjRUvASYkBs/HXDuEzV0ZExt4nFZ8Ouk0i5V/nkk+cPYMnYutdxTNntAXzAO2ajyrzwJqAWNW0vj15AZcndD/x5V120kp3/gCjHyG//mraYISvEbIfOCkX2SJYhCdUEapxdjRSq8
+A0lS7MxukCi7SyhEoeIsbjQ5tjqyYVs02002wYSX9+Ekdh6GuBTEr6izg/MARwkSek9z2S8tcdWc1nUUVxZty9ce0ToA/dhcaPpS7QIXnFAxrEUX8XTmQRWHZdSwtklrJYT3FCEWdkmbx1HolIpdPcrPu+gQhow6j44w/9z0JEinOh4Slchf
+ncP84pUs02GdNjBwk83xHrj6L+Z+LcYcjSTSHFOrlfHe7Ir7IGXUuQqVkSJa5E/ZE5C0UHWqs7Hsd44eYO8Ba68BZUj8PrkVOa7tMM9rQksW2kGoglMtG7KToVbRGAfGzDWl5Y+KOkZhsqKswVdPNtJoNF7/iREA/M6X9y4SyOByVsGPgRjD
+kwnYrONPiHGHtwhmSJd54sguAl2ZIu9O+wZFcxgdCjA26/Gaj/vgeE/JyloX9z6bVe2a2IvjC5eeRsDoKyQldTW8AQV4h4QqZmdXtWCFbHzeNOFiqS51XKJnFhf9tl/wl3BZ35cJ/ax3FN46HhgrriTZFhuM97ZuYUDJ/yC86PlVAg7aneEG
+XxiIzjt5JzEv+AZtRC1iyDrosyShbIqtrI5LPubE3nl8cSjAD6V25AfwEL85J0BM8e0pD3ankOA4RVkGGmXO+UHvWlMEKnSP5lnY+trE9gzzXTBkAACqW4JWwFd9AB2KWiPQvUWHp1jrGIPmuiRn9YgMqvb69OIOBcwOOZtJnDR0Nxp+jcSk
+IuBctCg8WPmG+Ky4zRs38YY0FItlEbbEH6+FHdLnY6JHEgG+R3/K0B434tET+XqfEfnT6UsbunoHPrLODTD29C6zGD+sYctwEFsHzxE8ZMurWlpADqOMqv0WftFAylHUHkkkt97W8s8GyDgf1VndldfRAckJq5ysfdlcBCtr6q2dfPww6Egq
+VVTUyAAMpemlY3Rqzgvr7CjYSwTPUMr+HmwhgnIuqHT2HkkZngY6AwWkkRn/gg/XJse4slgZpgr3SzMaPmKlVVQdue3CW952eojloeqakmdrJmjQDhCGf28KNc0z8OxxV5kBoFCAaBzpo4p9sQaHRjYqXIbp9VWQElc1/taAZg1HnqUylLmU
+vKX4RrCsNRVAa9xcN0pY3pYyAkPwynp4cFl5Zf7sOPhSBG0VPCoypMOqCjnq3FN9wNcxxzQrldS/YHo8jtIGzWarHJtv9012G7TfxTbGdprKE0k+w4CA90YkxV1hktkPe7yqt7h7ZHEKeI6ML5gGx/dJfuft0KoJgSNFNLFM1HyW87+76x/O
+whfUJCHKG09Uxmx3XuxykDvnjCX8sk2ez2arUKNP+E3J7zoOh4CVejsyEoCIJiNrEGpP8y9kqQq73DFhJxyef5HHVVd0isbigWva7c4UyEPlnd6lo/ny9/oz6xS4qJOfuAQxSPjCMHZaldHNWtIiriXqMFv4H1Sz4X3FZetf/prrzKFh7upe
+c5VR5zESyo8knw28/gn5eCVuhQhbDGKuCVItNOpfg+FX3U+Zvydfl27spd7j+T96eZXi7429OKZ7buruKop/cKQeqhvjssbiRWraBbw2w3Bzl447wAS9ierQWFAq4HfJtLR+sFO7RTgnYc5B5bPw9XNcu6PoWCh60YSL4NGvNIqgJAZsiOnK
+zZF9FsX4ZEs9ahWVY3G1xI6ZKDc/hFsRiCiF4fiCzKqGE5cA27U3+IziS38uQtIXsb0dAHmvL2mwT855f11qJrk358GG9HWv5jTK9nHIIO746qvLdJLkYQBR/8MdK1oJBrKwX0mkr9jMF0HKRm/OEeuTPEX7zNTb6Z06GtO3KZvvJ73Wbzvk
+4BdmYSS1OVRJNZRcx4ZI6g4N5tPK6oVyR6K4axC9h3h/1pKN/MAsTvf+L2Of1cAisdkuEXj7JwIiCrei6DylCllKI/NmJ28wipmtADDY8l1ydW6PuYcMeQe+l145/ARAqjVRCgHyi/JjdtoURB9wlIbCwf7uvfeP2srxU2N/+/W9jYitwjtS
+cA8O8Buxr49SEPROtIiCuwv3Ct6AX9ozLIWHhh1aX9MYwII0x7UjdAd874hRIHxHYA2YJ5+PADR282XvR7On0I3tGU/uQIT+1lSUd35EHBSsc/9wHko+7gZZzCGINDVCJkvTG/ykMur5mKoKUK4Vrck0S9qjswdwrF3d1gfE+r3Wis/AzWqb
+tsbgXPeHqqh0wKopZq1vh/KCsiQFu2CBTOrar1BCEhq4GfSBggiGhTxIh11VRF9aOzz3FVUF/I7PhtuHETMycAwPTd1RSyo0l4fktvtDmsgpK7ka2QkGSp9IkapNVpzLfey2+/KrYfBvq1gXDAkytDGlytfaYkag3Ud9fujAaglG8XkRFbnv
+15VjiqxutqnwgPOINOzMCIvoTsi9+DTYs3E5cSGOcdb3Ej33g0kzbHgRddiGaAfbaSAN8GoEICl3wrVg5W5GexIeK/yiz6snd+0B4+Pp1B+pbWmKS2Pk/pd3mxNpOLWbaKTA9eA5Hjracjvb9IcNv7E+D0VPrRlc7AinO4gVMcZ/XVcvlNXY
+kCQF5BsR6dpo2asQ+C5eif/MinHk0FKncX2UBKATBtYQIGG+PzFdLUyxOeCSEpqBgdCY3ZfdGIPBf33LHc15rJpyd4Kg/+JXrLSTWkuTviDRwSt2HjRMJ9pRR1Mv03tzicTOfN/wt3XF//q8nXACWcBuEKWwIh2dXxTLwOX4jwRoTMZe6oKE
+OcgIlvCyUFvzPYYGnTSGLVbPDMeOK5J+jZr01fFe47BmQTpM6g0GBj3BrAHsu1/NFE2hX2KpMLuk7Rthensdi4oAVUxEYX5R8POzDPYxNm7ma/54A+GLNF7K8DKBVOjis58r5FUQct6e1T27cqyc8Wx7GDr24WzewFLhFQiu8rA7wDyPO5sG
+VKpygb9ftlmMb/e9Xx/cYxgpn3LOIPQu9eJklGz8eKipJTW7oyRG5pzpEeHCjR8iNwq/9ZvtDEdQOj8B5U1g8WAcPhj0HSf6bPRvsR56j6mz6YqRoXknCOCsM4b8Qhui/DxApR3VFSa8XeMj2uG998M1o28347bHBRrNuht3cssSFbM17w/j
+LU6yZGZ7ph46a4iagwKbgljHQ9hQNv1+UzleDItzacPGigq9rQxvOUKk3ucQX7kx/uzWTD/s+dGRclG+Qef4lA7lWXGXL4gEFFIu+R6wJPRktMUh1K7G1OF8yKZTZmO4KiMqlSe25BDEkJ7MEOf2btlQGLWRslqA30lD9KHtIXfSbgMgv/BL
+tAh7rXQ+/4kNgPj60lhlWzn+3x3eRtYUhiyip9yH5O/F3w7997P9uoP4F4XZ7UOn+2oF9eFGFnX/ijW0sp0bBMohjhnOY0kR15tR0ULNwRwKeMKTKR1WiFBiT6Pw5T29RZj33xQNPNHMAVlKy1AZll9JX1+IsivdDF/gbpD/OzltGwSqvgV8
+xDPQDIGLN6jg9mkXTMSHV9Lhd+25p/ySmrbxd23ExsQhtHNUKCVGeJBJe40NJ83dhmL0n1ZLIUQ/GS+eOpNozbjBZEC45sPdOLpUO+894RANaBR0DgdRHJliEgLYUYtVpq6as2kBY+ivQ9bfS6hEA4D+wrU2oZxnF2myIfJ9vaJwvFTyjGn6
+DfA4ihU+s7hOVAvjRrFJHFODO/rP9y4MOPPYDrfkjfkHY8FJ8wcyckJeR3dPj7+0H5Q1EHUoYQvLNQ8whHYy/a5Tfhlf1pIEGr+KXHKJJEwe0OaMGyPWsJkFZdVReV6+/kLPgSxAAGXMfFpLm524FfK7bdRMKBDzCaORzBwTjUIHkSPlukQY
+DvgdAommJCCgnimK6D6lyP0EJiBHI5JGUqTRd1KaUFzTjwOigPa5qTNcwSHfBQTGLaKrOmjthh2SibBJm15usgDdgB4NXlCQZN9xAqpWKS70kEngDH+yHaBAjlwFPBHD6WYP7kB53l9YoO39ImJGffmT1SAk6PJ1cB+hTwj5gzay4m7DpxNy
+Wgm/6hyywA52piO9Gvbr19h0WaLa1lFRwQo4FDEAuExnES4MUu77I5Gxlpfmjqdxc2KNS3O2xs/vYGbHNv7bucMNYAai1GSa3VzTSJ47S6d+CKg7E7UGtb9sv78uMLTAb9gxEN9IFvFWc4khOFi4/zg4IdM3Wh/BiF5NAv+tacZzXIZjiKuH
+9JZXsCSeRbEbMncx8G/fH3ZkTAqZZApn7Pd5PR2jsn/i0mlOMBEh82uifAXQdyNHpQ0UF1e+/+hzVQhqfgj1Z2hbQ80vlx4KP4V1/qZNZztzy+ERiB0qEgb24icIEkjpAd1by9MUkXdwR8gzSfJylv3MRdYfxWWsCHlEtw6ysIcedyaSuYeP
+Bza0lc0r+rQHX20CJgr+oOl82FUxSIGBOM2nHJN4QYNlAFx8XERUdruE9Klxqj3OCMLk8b9iYCSQMdQ6Np9Ib0vdOl155X23Pqa9149MuyNw+1xAvjbvGSOcSmEBbtH7qL2sdPXp/1ExBnGeU4fMp46q+gZRjGr4m+8uPxDvTlnvMT5BJk4w
+iWGsg5weDtJ3kzDIMPx/06AE8bFsIdyEQqmGmx2PvhZg+06/6N9+hfjMSEFIpwwy7+UghSoWE3jcu2QnilCP8gkfkRkTgTrS3T8+xfjJkvb3idSzUy5jqS26txQpASnTkDojdVYifapdUBKn+phIWy6InxMa0fTAGcFydKcXDCMf5deVjOal
+Nva0BvVvckn31mbQBuEqhS3kj9krSPq31SoSIAfDx85SVuyYmn35Nfk8deG28xr+OsN7ah8p5X2JLW4wqbDyzA5fNFzDu9WC/OSuFgvE4wUMyLjQG39Prh6KAwUuhfxOWwIOiXzm8emacuRunZukJaD7vPGNaQHydBFl4ggYhOJnaGgd3sqB
+D8ZrGMdxy6h/MFxCDh9ZVMo196dUG7S0MYh6vGSK76/fI+AxnH0Qt5F7A1SWb2NoQhFicK1NXSqs/X1Q5X0kRVRuPw4N9zbldbzOjr5TyqwwOmmrbrVbRsA1q3xWoMbCRS1sdtqDOhtvfdRCwQd0Rag2JZJ/J58Jqsa9Q6kLV3jyKPOxU0SI
+JV43pVFAHAbl8guqiiZ3Bet3aiGd/ikXS05R2vT9yVLT5gVFwQxj3TYmKrBB+uviU9OcK76eeUf3dX1kGoNueWiVczsGat6imMxNA80GjzwPtt6L+sRO7H47alFoOqYFsXVvI0FAT+43+27y/XS3J7fRzaHa9wzbHvynYudOCPJILBtaQ61d
+Y3//2pXxFY7VFCFf7yMmJq2D1KHnMYSy4K/VlW8lZiaxwffATG4jScTcI9LJnt9CB+JB7I1PCvcd5Zypggg1QDUKlaOXkObKlbecTxblgismV95rx8GZyySgf5Ye/oeQcP+HEQ6LnAaoQ4HtRJkK5J3PJHkEM3u/uCeiLwIOzVN85+OMF5C7
+Js8E/aYuxlK0JamRx9gHNpFaDJ57gnFLAp4JD/T9C/oBXOWlDO6cS5eYN7onrLCtWlpVHQkWIorQkinMqJjJnQ+XIQhdgbJQ0U0/9qk4JIwMZMABAhbARUEqRnQMrbgILR5d35t+5FabGZDi/bCE7kdXOkoisPEKlxYBClpeJr0LDS4+WNa3
+q0XLS0JMuWKV7LyL+ttEMb5B1CSzbfMZbwLnzY8NujpYlsQkzLkx4ObQVt51oQT4qV7cpwbO2M8Skb6vZAH3QiEbO9r+it8D5E3PGRfWDkMyDPi2OIgExa2seIOMNjg5p/meURPV5AdqxH5kR7mYhjJK7WYl/P1NKAL5QAKNws72sry5Q1oz
+kHgF8fl9Vzv9vjv/mQysizAwc6Bi41M2WDJP8I6hWyGsAQgLwTjEFYY5BLrQcpK7bDaialkl3ZlO9WD9VyJ/E83cpDeb5Ezfgs7JlqnnKPYvlKdAHP9vBKaDJ7H7ffrJNqo1Pu4+llku0SQGdLzb5tMvOZQd9T1fWwpNKE258zzILujKX1vH
+tPCXkkIUIyvXr7fssRsRi4b8ZX6JDXfzkf+nCVa6MbYkZW3lE2gXt5U34q0bvNJ790sTx8ImFGQkpYKSKfH4ue/zORvBBmtdq18duXq3W9/z3BV+wACztEWeLamByu/9gbXyN0KYBR3/q5lw1BV+2A4/xn8jMFggh20SerRC1Rz54tSc8iFC
+FUukrdNEdco6yleHDKLVrSdkknZRH64LE74VdZQhzcYLvWQ0CUXpBsVM/2TreKRoMjVgphv50DFx3DZFf67iSTjwnqiyCJCQSEpX9aS5qirsj0Wa2MWF34u8/B0JahBMAHM2MZen/JkSrEts6qKrP9a0rswBlGa0Q+aJ0S3sGdGlrBg2V6aW
+jmDyNG7aXwzVLQAjrB4LJAoN792sHFAsRMIVbKOxaMXoIEzQPSIMmEHM5JfqZMX8wOu2gvpyREnAQscOlUGHckX7qoO3Imm2G/a7tAbkUhBPqyl8W+lDjoqCZkvmMYZSYOcGlzdcSNR10m5rhxPeEC65vgcc24vCnrSnSwuhTQwCGu4al4vp
+A/tAzeeB9IesbaSDfMJrPPe6VgIaxLH0wTP7AXlqwR4UYqyf+tUaHAkuqXfH4F9pSEUP3jrsUNrzGnG1QWSd7owFZDK5ziDsjfjxEO9GiBwWNLDLmOM70GWrLnsfqOchGiUITeWsHJ8YfAYE9ffjcdFDds+EvYicrMHPrPTKqKobt7W/mi5y
+FaAJrPsgA7VuOnuZnH4rGwwBNI231gw0dAzBGBb2ZNyXbwYE4lnL13aeYrkmpd1zNKLiYU8miwp+hoCW+BfNilUC2AgngZRHsCWzT8DI73TOTK0adEMb9nmfKxnehchIwCF2RdjcyXxoA1jQ4iY2VMKnQii3AOliFiMltnHhRZRaBAVhuAV8
+yPHkd1IGRtjOvthKMiSFGvj4T9IRu8bz03ymPl+acHhNbV/yH8CL2vlKE3yohtCpAH9Es/1ghV+V0FOR9VcuGt7PDWPZ7ruNFeGAvtalMVV1a29O5B379uc7kh0QoksRXKCnjAuCeVTHzL6OTDOVr64HcxhUdAHRV2i7v72J5V6eDHZITBct
+/osbynMeoMc0ICFl/1uBIrNVcotVhOVowW4xvBrJ4tNfFFajXKpeOYtKeBTkDHgraG32z0g0COSK+RiwVRiJCRrymR9SlktLdPw1QyODDT7Ozj12rjyiB234jUGlNzpxGIhs/6WhWZk3UnIkwTysq+5nWNJeEaP8bGVFaMlBqrGC7DX5WOQW
+Wa9RuJrOJPZMCGahcB5L60YJoWgVnmvS9dzzu/MkUCAsc4baX1XZTy5/+/JycfUKrtg+YfkN07zW7EJ70KTL8gyGKPrFXrsyn7YWFZXzcv88ClJRdLoh5+goGoxllIMcjMJk+vhpjp4lRFxfYzMSX+ifY3MUXZE2MyY3p4z3Vxm64CFk0Dz8
+BJvHycG4F+mIOM0hlooSNoKKT9yviX98IZXp64vQg/19TvhDmHlsgzHlPDeBOR6ifXEjs0z/Q4xF0ZRk7XghYwytciqDqCDf7mQ8OWwFXXlXcNZSTVWLYp7leIPBQpkv4GFx4wgM8ns2v/KJp5jzdVUrKUZQkXz8/95MtlmZ1m1pDjL3URgl
+lYQMj1YZiZOOD+wNzJ8XXu8m57DVtIEfdmf5uO2ib9nL71kzWNyJzJYhEaLP6wukSLyAWgJkxQR2KY4Adi6B/AdphpLo7LIwNI8tH9fGXQW/Vvf7OlBRSFs2lha3InU7PDwJgjqNvoxaTMSvo/KjKl33lg2nBveqNFtT5p3kxco63VT5AXlH
+EwNn2Pm0cJU75C6zUpII5f9awWJNeAAAc5FUHiUgZiAABk8EBlroCYvXUPbHEZ/sCAAAAAARZWg==.
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 2a45e4b775c8c76b0d333e0bca33db17a5343241..ec68e02fd3a0f133e439dd5ca943248da28f2965 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
 mosspy
-jinja2
+jinja2      # Used to create the _grade.py script.
 unitgrade
 # setuptools==57 # This is because of pyminifier (mumble, grumble)
 # pyminifier         # No longer needed; bundled.
diff --git a/setup.py b/setup.py
index fea73ea8c94e4f1de994daec49b4c16a22fd21f3..5b6bfea06d627ffa8cf5d1c947fd4da715188116 100644
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ setuptools.setup(
     packages=setuptools.find_packages(where="src"),
     include_package_data=True,
     python_requires=">=3.8",
-    install_requires=['numpy', "codesnipper", 'tabulate', 'tqdm', "pyfiglet",
+    install_requires=['unitgrade', 'numpy', "codesnipper", 'tabulate', 'tqdm', "pyfiglet", 'jinja2'
                       "colorama", "coverage", # 'pyminifier',  cannot use pyminifier because 2to3 issue. bundled. will that work?
                       'mosspy'],
 )
diff --git a/src/unitgrade_private/hidden_create_files.py b/src/unitgrade_private/hidden_create_files.py
index 2924e2239ad103101289b5220c5b70b014f22226..b0d0affb804a5b5531a0d083078aed85b5ab41dd 100644
--- a/src/unitgrade_private/hidden_create_files.py
+++ b/src/unitgrade_private/hidden_create_files.py
@@ -5,10 +5,10 @@ import inspect
 import time
 import os
 from unitgrade_private import hidden_gather_upload
-import sys
+# import sys
 import os
 import glob
-from pupdb.core import PupDB
+# from pupdb.core import PupDB
 
 data = """
 {{head}}
@@ -101,16 +101,16 @@ def setup_grade_file_report(ReportClass, execute=False, obfuscate=False, minify=
                                                                    'coverage_files': cf
                                                                    }
             a = 34
-    s, _ = dict2picklestring(artifacts['questions'])
+    # s, _ = dict2picklestring(artifacts['questions'])
     db['questions'] = artifacts['questions'] # ('questions', s)
     with open(report._artifact_file(), 'wb') as f:
         pickle.dump(db, f)
 
     for f in glob.glob(os.path.dirname(report._artifact_file()) + "/*.json") + glob.glob(os.path.dirname(report._artifact_file()) + "/cache.db*"): # blow old artifact files. should probably also blow the test cache.
-        if os.path.basename(f).startswith("main_config"):
-            continue
-        else:
-            os.remove(f)
+        # if os.path.basename(f).startswith("main_config"):
+        #     continue
+        # else:
+        os.remove(f)
 
     from unitgrade_private.hidden_gather_upload import gather_report_source_include
     sources = gather_report_source_include(report)