diff --git a/README.md b/README.md
index 732ee86cc5f6583f8ea2c71579c5858572b5b4f8..3f88deb4992814288a2964c4cb055eeb6583be0c 100644
--- a/README.md
+++ b/README.md
@@ -31,23 +31,89 @@ Let's look at an example. You need three files
 ```
 instructor/cs101/homework.py # This contains the students homework
 instructor/cs101/report1.py  # This contains the tests
-instructor/cs101/deploy.py   # This deploys the tests
+instructor/cs101/deploy.py   # A private file to deploy the tests
 ```
 
 ### The homework
-The homework is just any old python code. 
+The homework is just any old python code you would give to the students. For instance:
 ```python
-def add(a,b):
-   # Write a function which add two numbers!
-   return a+b # naturally, this part would NOT be destributed to students
+def reverse_list(mylist): #!f
+    """
+    Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
+    reverse_list([1,2,3]) should return [3,2,1] (as a list).
+    """
+    return list(reversed(mylist))
+
+def add(a,b): #!f
+    """ Given two numbers `a` and `b` this function should simply return their sum:
+    > add(a,b) = a+b """
+    return a+b
+
+if __name__ == "__main__":
+    # Problem 1: Write a function which add two numbers
+    print(f"Your result of 2 + 2 = {add(2,2)}")
+    print(f"Reversing a small list", reverse_list([2,3,5,7]))
 ```
 ### The test: 
-The test consists of individual problems and a report-class. The tests themselves are just regular Unittest. For instance:
+The test consists of individual problems and a report-class. The tests themselves are just regular Unittest (we will see a slightly smarter idea in a moment). For instance:
+```python
+from homework1 import reverse_list, add
+import unittest
+
+class Week1(unittest.TestCase):
+    def test_add(self):
+        self.assertEqual(add(2,2), 4)
+        self.assertEqual(add(-100, 5), -95)
+
+    def test_reverse(self):
+        self.assertEqual(reverse_list([1,2,3]), [3,2,1])
+
+```
+A number of tests can be collected into a `Report`, which will allow us to assign points to the tests and use the more advanced features of the framework later. A complete, minimal example:
+
 ```python
+from unitgrade2.unitgrade2 import Report
+from unitgrade2.unitgrade_helpers2 import evaluate_report_student
+from homework1 import reverse_list, add
 import unittest
-class MyTest(unittest.TestCase):
-   # Write a function which add two numbers!
-   return a+b # naturally, this part would NOT be destributed to students
+
+class Week1(unittest.TestCase):
+    def test_add(self):
+        self.assertEqual(add(2,2), 4)
+        self.assertEqual(add(-100, 5), -95)
+
+    def test_reverse(self):
+        self.assertEqual(reverse_list([1,2,3]), [3,2,1])
+
+import cs101
+class Report1(Report):
+    title = "CS 101 Report 1"
+    questions = [(Week1, 10)]  # Include a single question for 10 credits.
+    pack_imports = [cs101]
+
+if __name__ == "__main__":
+    # Uncomment to simply run everything as a unittest:
+    # unittest.main(verbosity=2)
+    evaluate_report_student(Report1())
+```
+
+## Deployment
+The above is all you need if you simply want to use the framework as a self-check: Students can run the code and see how well they did. 
+In order to begin using the framework for evaluation we need to create a bit more structure. We do that by deploying the report class as follows:
+```python
+from report1 import Report1
+from unitgrade_private2.hidden_create_files import setup_grade_file_report
+from snipper import snip_dir
+import shutil
+
+if __name__ == "__main__":
+    setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False)
+
+    # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper
+    snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py'])
+
 ```
+ - The first line creates the `report1_grade.py` script and any additional data files needed by the tests (none in this case)
+ - The second line set up the students directory (remember, we have included the solutions!) and remove the students solutions. You can check the results in the students folder.
 
 
diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc
index 0edd5337296831e5e9223e6b60b503ad093cf02e..e5e928b3cf8c95f9ee7e0283457b713591a406e4 100644
Binary files a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc and b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc differ
diff --git a/examples/example_docker/instructor/cs103/deploy.py b/examples/example_docker/instructor/cs103/deploy.py
index 807c37e4e9637aec500551c1cb22820a433bd8ec..24299492a33d4fd290394b344abd9fc96ef2d149 100644
--- a/examples/example_docker/instructor/cs103/deploy.py
+++ b/examples/example_docker/instructor/cs103/deploy.py
@@ -14,14 +14,10 @@ def deploy_student_files():
     setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False)
     Report3.reset()
 
-    # from cs103.report3_complete import Report3
-    # gather_upload_to_campusnet(Report3())
-
     fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py")
     setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False)
     sdir = "../../students/cs103"
-    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True,
-             exclude=['*.token', 'deploy.py', 'report3_complete*.py'])
+    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py'])
     return sdir
 
 def run_student_code_on_docker(Dockerfile, student_token_file):
@@ -36,13 +32,11 @@ def run_student_code_on_docker(Dockerfile, student_token_file):
 if __name__ == "__main__":
     # Step 1: Deploy the students files and return the directory they were written to
     student_directory = deploy_student_files()
+
     # Step 2: Simulate that the student run their report script and generate a .token file.
     os.system("cd ../../students && python -m cs103.report3_grade")
     student_token_file = glob.glob(student_directory + "/*.token")[0]
-    # Let's quickly check the students score:
-    with open(student_token_file, 'rb') as f:
-        results = pickle.load(f)
-    print("Student's score was:", results['total'])
+
 
     # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt).
     Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile"
@@ -50,4 +44,9 @@ if __name__ == "__main__":
 
     # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file:
     checked_token = run_student_code_on_docker(Dockerfile, student_token_file)
-    print("My results of the students score was", checked_token['total'])
+
+    # Let's quickly compare the students score to what we got (the dictionary contains all relevant information including code).
+    with open(student_token_file, 'rb') as f:
+        results = pickle.load(f)
+    print("Student's score was:", results['total'])
+    print("My independent evaluation of the students score was", checked_token['total'])
diff --git a/examples/example_docker/instructor/cs103/report3_complete_grade.py b/examples/example_docker/instructor/cs103/report3_complete_grade.py
index e7244c084c21c05d374aacc32bfa4f5c4ced2039..b0deb8ee4bdcedb9632c0b37b7186caefcb09001 100644
--- a/examples/example_docker/instructor/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_complete_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f505b000000000075732e'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6066800000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/cs103/report3_grade.py b/examples/example_docker/instructor/cs103/report3_grade.py
index 8d2156999f74cb59fe8634dc37effee146841ab9..7a98ca2bb1c0ee5be8efa2df70e42f7abdb2a140 100644
--- a/examples/example_docker/instructor/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token
index 09b7d0966cea33dd01451ccee5e5befd524b1dd7..f341fcbe61424bb2cfb1ff275aada4c9ac0b1d87 100644
Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token and b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token differ
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_20_of_20.token b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_20_of_20.token
deleted file mode 100644
index a402e6183235eee92b6de0f54d950df094897ccc..0000000000000000000000000000000000000000
Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_20_of_20.token and /dev/null differ
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc
index 1f10f66e9b20dea24d1969426da24f2459b0e1f8..de977681ea2f743536e28e736d54eac5477fa5a8 100644
Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc and b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc differ
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc
index 870a12277bd7eac55d2382ab61dbf6bf9b568faa..324b033b154a5d1db71862ebf7103810d4499aa2 100644
Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc and b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc differ
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_grade.cpython-38.pyc
deleted file mode 100644
index daea90bc8b208769b148b93df011cd59b9a5460e..0000000000000000000000000000000000000000
Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_grade.cpython-38.pyc and /dev/null differ
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
index e7244c084c21c05d374aacc32bfa4f5c4ced2039..b0deb8ee4bdcedb9632c0b37b7186caefcb09001 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f505b000000000075732e'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6066800000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
index efd340396e9e790026b9ef3e40351be4d3fed19d..03baa4ed8a6fcd16ec0b6227d7eefc95f66d0bac 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
@@ -431,7 +431,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/students/cs103/Report3_handin_0_of_20.token b/examples/example_docker/students/cs103/Report3_handin_0_of_20.token
index 347cfbde41438ca1ba78ee4d4e7953e7b8ce3108..89d9eaa50e45e079355529ae91c29faf094d2851 100644
Binary files a/examples/example_docker/students/cs103/Report3_handin_0_of_20.token and b/examples/example_docker/students/cs103/Report3_handin_0_of_20.token differ
diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc
index a01edf9d5079a89fbce46b7ed3c9b3bedffaec9f..77bcba4d2fb526f7d14ec3f8c1d6ac95ddeae116 100644
Binary files a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc differ
diff --git a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc
index 0edd5337296831e5e9223e6b60b503ad093cf02e..e5e928b3cf8c95f9ee7e0283457b713591a406e4 100644
Binary files a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc differ
diff --git a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc
index e31d630d54cae23263b1a11b45c49a35268949dd..e1fe41d0d88015abab6229a54f2d8f5e6a8eb04c 100644
Binary files a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc differ
diff --git a/examples/example_docker/students/cs103/report3_grade.py b/examples/example_docker/students/cs103/report3_grade.py
index efd340396e9e790026b9ef3e40351be4d3fed19d..03baa4ed8a6fcd16ec0b6227d7eefc95f66d0bac 100644
--- a/examples/example_docker/students/cs103/report3_grade.py
+++ b/examples/example_docker/students/cs103/report3_grade.py
@@ -431,7 +431,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_framework/instructor/cs102/deploy.py b/examples/example_framework/instructor/cs102/deploy.py
index 4cceb5fb27d0a52056f06e4c31af093aeab66452..c5859086617e82fa7a118f320895376a784aeb30 100644
--- a/examples/example_framework/instructor/cs102/deploy.py
+++ b/examples/example_framework/instructor/cs102/deploy.py
@@ -6,4 +6,4 @@ if __name__ == "__main__":
 
     gather_upload_to_campusnet(Report2())
     setup_grade_file_report(Report2, minify=False, obfuscate=False, execute=False)
-    snip_dir(source_dir="../cs102", dest_dir="../../students/cs102", clean_destination_dir=True, exclude=['*.token', 'deploy.py'])
+    snip_dir(source_dir="../cs102", dest_dir="../../students/cs102", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py'])
diff --git a/examples/example_simplest/instructor/cs101/deploy.py b/examples/example_simplest/instructor/cs101/deploy.py
index 8906532315f44a5077a5d4de3313beeb1707f35c..3e9682d9aa6d9ffce1501d7826a8bd126779d75c 100644
--- a/examples/example_simplest/instructor/cs101/deploy.py
+++ b/examples/example_simplest/instructor/cs101/deploy.py
@@ -2,15 +2,12 @@ from report1 import Report1
 from unitgrade_private2.hidden_create_files import setup_grade_file_report
 from snipper import snip_dir
 import shutil
-# from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet
 
 if __name__ == "__main__":
-    # gather_upload_to_campusnet(Report1())
     setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False)
 
     # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper
-
-    snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['*.token', 'deploy.py'])
+    snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py'])
 
     # For my own sake, copy the homework to the other examples.
     for f in ['../../../example_framework/instructor/cs102/homework1.py', '../../../example_docker/instructor/cs103/homework1.py']:
diff --git a/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc b/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc
index 2bf0b8a3caada163a94b085cfc3a27fc88accc55..61e1a3ce0d3a883def761a67fc6197ab8888857b 100644
Binary files a/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc and b/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc differ
diff --git a/unitgrade_private2/docker_helpers.py b/unitgrade_private2/docker_helpers.py
index 965ddf50bea779f2d421b3cf747010baf57d07b6..a91e4bad32cefe8335c4ac540bfc5396c1ce8caf 100644
--- a/unitgrade_private2/docker_helpers.py
+++ b/unitgrade_private2/docker_helpers.py
@@ -26,13 +26,15 @@ def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file,
     """
     # A bunch of tests. This is going to be great!
     assert os.path.exists(Dockerfile_location)
-
     start = time.time()
 
     with open(student_token_file, 'rb') as f:
         results = pickle.load(f)
     sources = results['sources'][0]
 
+    if os.path.exists(host_tmp_dir):
+        shutil.rmtree(host_tmp_dir)
+
     with io.BytesIO(sources['zipfile']) as zb:
         with zipfile.ZipFile(zb) as zip:
             zip.extractall(host_tmp_dir)
@@ -43,7 +45,6 @@ def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file,
     else:
         gscript = instructor_grade_script
 
-    # rel_location =
     student_grade_script = host_tmp_dir + "/" + sources['name'] + "/" + sources['report_relative_location']
     instructor_grade_script = os.path.dirname(student_grade_script) + "/"+os.path.basename(gscript)
     shutil.copy(gscript, instructor_grade_script)