From 6ec27f58e31a18a1bfe57b35ad5c289235a12074 Mon Sep 17 00:00:00 2001
From: Tue Herlau <tuhe@dtu.dk>
Date: Thu, 16 Sep 2021 11:20:01 +0200
Subject: [PATCH] Version jump with changes to token file structure

---
 .../cs101courseware_example/cs101report1.py   |   4 +-
 .../cs101report1_grade.py                     |   2 +-
 .../cs101courseware_example/cs101report2.py   |   6 +-
 .../cs101courseware_example/instructions.py   |   2 +-
 docs/mkdocs.py                                |   4 +-
 setup.py                                      |   4 +-
 src/unitgrade/__init__.py                     |  70 +-
 src/unitgrade/unitgrade.py                    | 865 ++++++++++++------
 src/unitgrade/unitgrade_helpers.py            | 234 ++---
 src/unitgrade/version.py                      |   2 +-
 src/unitgrade2/__init__.py                    |  44 -
 .../__pycache__/__init__.cpython-38.pyc       | Bin 494 -> 0 bytes
 .../__pycache__/unitgrade2.cpython-38.pyc     | Bin 22788 -> 0 bytes
 .../unitgrade_helpers2.cpython-38.pyc         | Bin 6853 -> 0 bytes
 .../__pycache__/version.cpython-38.pyc        | Bin 171 -> 0 bytes
 src/unitgrade2/unitgrade2.py                  | 686 --------------
 src/unitgrade2/unitgrade_helpers2.py          | 195 ----
 src/unitgrade2/version.py                     |   1 -
 src/unitgrade_v1/__init__.py                  |  36 +
 src/unitgrade_v1/unitgrade.py                 | 414 +++++++++
 .../unitgrade_grade.py                        |   0
 src/unitgrade_v1/unitgrade_helpers.py         | 268 ++++++
 src/unitgrade_v1/version.py                   |   1 +
 23 files changed, 1420 insertions(+), 1418 deletions(-)
 delete mode 100644 src/unitgrade2/__init__.py
 delete mode 100644 src/unitgrade2/__pycache__/__init__.cpython-38.pyc
 delete mode 100644 src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc
 delete mode 100644 src/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc
 delete mode 100644 src/unitgrade2/__pycache__/version.cpython-38.pyc
 delete mode 100644 src/unitgrade2/unitgrade2.py
 delete mode 100644 src/unitgrade2/unitgrade_helpers2.py
 delete mode 100644 src/unitgrade2/version.py
 create mode 100644 src/unitgrade_v1/__init__.py
 create mode 100644 src/unitgrade_v1/unitgrade.py
 rename src/{unitgrade => unitgrade_v1}/unitgrade_grade.py (100%)
 create mode 100644 src/unitgrade_v1/unitgrade_helpers.py
 create mode 100644 src/unitgrade_v1/version.py

diff --git a/docs/legacy/cs101courseware_example/cs101report1.py b/docs/legacy/cs101courseware_example/cs101report1.py
index d113630..daaf174 100644
--- a/docs/legacy/cs101courseware_example/cs101report1.py
+++ b/docs/legacy/cs101courseware_example/cs101report1.py
@@ -1,5 +1,5 @@
-from unitgrade.unitgrade import QuestionGroup, Report, QPrintItem
-from unitgrade.unitgrade_helpers import evaluate_report_student
+from unitgrade_v1.unitgrade import QuestionGroup, Report, QPrintItem
+from unitgrade_v1.unitgrade_helpers import evaluate_report_student
 from cs101courseware_example import homework1
 
 class ListReversalQuestion(QuestionGroup):
diff --git a/docs/legacy/cs101courseware_example/cs101report1_grade.py b/docs/legacy/cs101courseware_example/cs101report1_grade.py
index 15cdc7e..a924a0a 100644
--- a/docs/legacy/cs101courseware_example/cs101report1_grade.py
+++ b/docs/legacy/cs101courseware_example/cs101report1_grade.py
@@ -312,7 +312,7 @@ def iFlPxdyCqBrgMNDzVoEH(iFlPxdyCqBrgMNDzVoEu,iFlPxdyCqBrgMNDzVoEL,payload):
  pl=iFlPxdyCqBrgMNDzVoch(iFlPxdyCqBrgMNDzVojE.fromhex(payload))
  iFlPxdyCqBrgMNDzVofu=iFlPxdyCqBrgMNDzVocW(iFlPxdyCqBrgMNDzVoEu)(payload=pl,strict=iFlPxdyCqBrgMNDzVocG)
  return iFlPxdyCqBrgMNDzVofu
-iFlPxdyCqBrgMNDzVoEL='import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    # _precomputed_title = None\n\n    def __init__(self, working_directory=None, correct_answer_payload=None, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        self._correct_answer_payload = correct_answer_payload\n        self.question = question\n        # self.a = "not set"\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    # def get_title(self):\n    #     Overwrite this to compute a post-computed title.\n    #     return None\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print("Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Graph search"\n    items = None\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n\n    def __init__(self, *args, **kwargs):\n\n        self.name = self.__class__.__name__\n        if self.items is None:\n            self.items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for gt in members:\n                self.items.append( (gt, 1) )\n        self.items = [(I(question=self), w) for I, w in self.items]\n        self.has_called_init_ = False\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(inspect.getfile(type(self))))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        import time\n        qs = [] # Has to accumulate to new array otherwise the setup/evaluation steps cannot be run in sequence.\n        for k, (Q, w) in enumerate(self.questions):\n            # print(k, Q)\n            start = time.time()\n            q = (Q(working_directory=self.wdir), w)\n            q[0].t_init = time.time() - start\n            # if time.time() -start > 0.2:\n            #     raise Exception(Q, "Question takes to long to initialize. Use the init() function to set local variables instead")\n            # print(time.time()-start)\n            qs.append(q)\n        self.questions = qs\n        # self.questions = [(Q(working_directory=self.wdir),w) for Q,w in self.questions]\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        else:\n            if os.path.isfile(self.computed_answers_file):\n                self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n            else:\n                s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n                if strict:\n                    raise Exception(s)\n                else:\n                    print(s)\n\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            for item, _ in q.items:\n                if q.name not in payloads or item.name not in payloads[q.name]:\n                    s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n                    if strict:\n                        raise Exception(s)\n                    else:\n                        print(s)\n                else:\n                    item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n                    item.estimated_time = payloads[q.name][item.name].get("time", 1) #"[\'time\']\n                    q.estimated_time = payloads[q.name].get("time", 1)\n                    if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n                        item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n                    try:\n                        if "title" in payloads[q.name][item.name]:\n                            item.title = payloads[q.name][item.name][\'title\']\n                    except Exception as e:\n                        pass\n                        print("bad", e)\n        self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n\n\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n\n    def terminate(self):\n\n\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n        # if self.pbar is not None:\n        #     self.pbar.close()\n        #     self.pbar = None\n        # for _ in tqdm.tqdm(range(n), file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100, bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\'): #, unit_scale=dt, unit=\'seconds\'):\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\n# import unitgrade\n\n# from unitgrade.unitgrade import Hidden\n# import unitgrade as ug\n# import unitgrade.unitgrade as ug\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n#from threading import Thread  # This import presents a problem for the minify-code compression tool.\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n# parser.add_argument(\'integers\', metavar=\'N\', type=int, nargs=\'+\',\n#                     help=\'an integer for the accumulator\')\n# parser.add_argument(\'--sum\', dest=\'accumulate\', action=\'store_const\',\n#                     const=sum, default=max,\n#                     help=\'sum the integers (default: find the max)\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n    try:  # For registering stats.\n        import irlc.lectures\n        import xlwings\n        from openpyxl import Workbook\n        import pandas as pd\n        from collections import defaultdict\n        dd = defaultdict(lambda: [])\n        error_computed = []\n        for k1, (q, _) in enumerate(report.questions):\n            for k2, (item, _) in enumerate(q.items):\n                dd[\'question_index\'].append(k1)\n                dd[\'item_index\'].append(k2)\n                dd[\'question\'].append(q.name)\n                dd[\'item\'].append(item.name)\n                dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n                error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n\n        qstats = report.wdir + "/" + report.name + ".xlsx"\n\n        if os.path.isfile(qstats):\n            d_read = pd.read_excel(qstats).to_dict()\n        else:\n            d_read = dict()\n\n        for k in range(1000):\n            key = \'run_\'+str(k)\n            if key in d_read:\n                dd[key] = list(d_read[\'run_0\'].values())\n            else:\n                dd[key] = error_computed\n                break\n\n        workbook = Workbook()\n        worksheet = workbook.active\n        for col, key in enumerate(dd.keys()):\n            worksheet.cell(row=1, column=col+1).value = key\n            for row, item in enumerate(dd[key]):\n                worksheet.cell(row=row+2, column=col+1).value = item\n\n        workbook.save(qstats)\n        workbook.close()\n\n    except ModuleNotFoundError as e:\n        s = 234\n        pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n    for n, (q, w) in enumerate(report.questions):\n        q_hidden = issubclass(q.__class__, Hidden)\n        # report.globals = q.globals\n        # q.globals = report.globals\n        if question is not None and n+1 != question:\n            continue\n\n        # Don\'t use f format strings.\n        q_title_print = "Question %i: %s"%(n+1, q.title)\n        print(q_title_print, end="")\n        # sys.stdout.flush()\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # Active progress bar.\n\n        for j, (item, iw) in enumerate(q.items):\n            if qitem is not None and question is not None and item is not None and j+1 != qitem:\n                continue\n            if not q.has_called_init_:\n                start = time.time()\n\n                cc = None\n                if show_progress_bar:\n                    cc = ActiveProgress(t=q.estimated_time, title=q_title_print)\n                with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n                    try:\n                        q.init()  # Initialize the question. Useful for sharing resources.\n                    except Exception as e:\n                        if not passall:\n                            if not silent:\n                                print(" ")\n                                print("="*30)\n                                print(f"When initializing question {q.title} the initialization code threw an error")\n                                print(e)\n                                print("The remaining parts of this question will likely fail.")\n                                print("="*30)\n\n                if show_progress_bar:\n                    cc.terminate()\n                    sys.stdout.flush()\n                    print(q_title_print, end="")\n\n                q.has_called_init_ = True\n                q_time =np.round(  time.time()-start, 2)\n\n                print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n                print("=" * nL)\n\n            item.question = q # Set the parent question instance for later reference.\n            item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n\n            if show_progress_bar:\n                cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n            else:\n                print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n            hidden = issubclass(item.__class__, Hidden)\n            # if not hidden:\n            #     print(ss, end="")\n            # sys.stdout.flush()\n            start = time.time()\n            (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n            q_[j] = {\'w\': iw, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n            tsecs = np.round(time.time()-start, 2)\n            if show_progress_bar:\n                cc.terminate()\n                sys.stdout.flush()\n                print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n\n            if not hidden:\n                ss = "PASS" if current == possible else "*** FAILED"\n                if tsecs >= 0.1:\n                    ss += " ("+ str(tsecs) + " seconds)"\n                print(ss)\n\n        ws, possible, obtained = upack(q_)\n        possible = int(ws @ possible)\n        obtained = int(ws @ obtained)\n        obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'hidden\': q_hidden, \'title\': q.title}\n\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    top_package = os.path.dirname(top_package)\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for m in report.pack_imports:\n            nimp  = gather_imports(m)\n            if len([k for k in nimp if k not in sources]) > 0:\n                print(f"*** {m.__name__}")\n            sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    bzwrite(json_str, token)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.1.8"\n\nfrom cs101courseware_example import homework1\n\nclass ListReversalQuestion(QuestionGroup):\n    title = "Reversal of list"\n\n    class ListReversalItem(QPrintItem):\n        l = [1, 3, 5, 1, 610]\n        def compute_answer_print(self):\n            from cs101courseware_example.homework1 import reverse_list\n            return reverse_list(self.l)\n\n    class ListReversalWordsItem(ListReversalItem):\n        l = ["hello", "world", "summer", "dog"]\n\nclass LinearRegressionQuestion(QuestionGroup):\n    title = "Linear regression and Boston dataset"\n    class CoefficientsItem(QPrintItem):\n        testfun = QPrintItem.assertL2\n        tol = 0.03\n\n        def compute_answer_print(self):\n            from cs101courseware_example.homework1 import boston_linear\n            boston_linear()\n\n        def process_output(self, res, txt, numbers):\n            return numbers[:-1]\n\n    class RMSEItem(CoefficientsItem):\n        def process_output(self, res, txt, numbers):\n            return numbers[-1]\n\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(ListReversalQuestion, 5), (LinearRegressionQuestion, 13)]\n    pack_imports = [homework1] # Include this file in .token file'
+iFlPxdyCqBrgMNDzVoEL='import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade_v1.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    # _precomputed_title = None\n\n    def __init__(self, working_directory=None, correct_answer_payload=None, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        self._correct_answer_payload = correct_answer_payload\n        self.question = question\n        # self.a = "not set"\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    # def get_title(self):\n    #     Overwrite this to compute a post-computed title.\n    #     return None\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print("Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Graph search"\n    items = None\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n\n    def __init__(self, *args, **kwargs):\n\n        self.name = self.__class__.__name__\n        if self.items is None:\n            self.items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for gt in members:\n                self.items.append( (gt, 1) )\n        self.items = [(I(question=self), w) for I, w in self.items]\n        self.has_called_init_ = False\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(inspect.getfile(type(self))))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        import time\n        qs = [] # Has to accumulate to new array otherwise the setup/evaluation steps cannot be run in sequence.\n        for k, (Q, w) in enumerate(self.questions):\n            # print(k, Q)\n            start = time.time()\n            q = (Q(working_directory=self.wdir), w)\n            q[0].t_init = time.time() - start\n            # if time.time() -start > 0.2:\n            #     raise Exception(Q, "Question takes to long to initialize. Use the init() function to set local variables instead")\n            # print(time.time()-start)\n            qs.append(q)\n        self.questions = qs\n        # self.questions = [(Q(working_directory=self.wdir),w) for Q,w in self.questions]\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        else:\n            if os.path.isfile(self.computed_answers_file):\n                self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n            else:\n                s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n                if strict:\n                    raise Exception(s)\n                else:\n                    print(s)\n\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            for item, _ in q.items:\n                if q.name not in payloads or item.name not in payloads[q.name]:\n                    s = f"> Broken resource dictionary submitted to unitgrade_v1 for question {q.name} and subquestion {item.name}. Framework will not work."\n                    if strict:\n                        raise Exception(s)\n                    else:\n                        print(s)\n                else:\n                    item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n                    item.estimated_time = payloads[q.name][item.name].get("time", 1) #"[\'time\']\n                    q.estimated_time = payloads[q.name].get("time", 1)\n                    if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n                        item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n                    try:\n                        if "title" in payloads[q.name][item.name]:\n                            item.title = payloads[q.name][item.name][\'title\']\n                    except Exception as e:\n                        pass\n                        print("bad", e)\n        self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n\n\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n\n    def terminate(self):\n\n\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n        # if self.pbar is not None:\n        #     self.pbar.close()\n        #     self.pbar = None\n        # for _ in tqdm.tqdm(range(n), file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100, bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\'): #, unit_scale=dt, unit=\'seconds\'):\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\n# import unitgrade_v1\n\n# from unitgrade_v1.unitgrade_v1 import Hidden\n# import unitgrade_v1 as ug\n# import unitgrade_v1.unitgrade_v1 as ug\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n#from threading import Thread  # This import presents a problem for the minify-code compression tool.\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n# parser.add_argument(\'integers\', metavar=\'N\', type=int, nargs=\'+\',\n#                     help=\'an integer for the accumulator\')\n# parser.add_argument(\'--sum\', dest=\'accumulate\', action=\'store_const\',\n#                     const=sum, default=max,\n#                     help=\'sum the integers (default: find the max)\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n    try:  # For registering stats.\n        import irlc.lectures\n        import xlwings\n        from openpyxl import Workbook\n        import pandas as pd\n        from collections import defaultdict\n        dd = defaultdict(lambda: [])\n        error_computed = []\n        for k1, (q, _) in enumerate(report.questions):\n            for k2, (item, _) in enumerate(q.items):\n                dd[\'question_index\'].append(k1)\n                dd[\'item_index\'].append(k2)\n                dd[\'question\'].append(q.name)\n                dd[\'item\'].append(item.name)\n                dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n                error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n\n        qstats = report.wdir + "/" + report.name + ".xlsx"\n\n        if os.path.isfile(qstats):\n            d_read = pd.read_excel(qstats).to_dict()\n        else:\n            d_read = dict()\n\n        for k in range(1000):\n            key = \'run_\'+str(k)\n            if key in d_read:\n                dd[key] = list(d_read[\'run_0\'].values())\n            else:\n                dd[key] = error_computed\n                break\n\n        workbook = Workbook()\n        worksheet = workbook.active\n        for col, key in enumerate(dd.keys()):\n            worksheet.cell(row=1, column=col+1).value = key\n            for row, item in enumerate(dd[key]):\n                worksheet.cell(row=row+2, column=col+1).value = item\n\n        workbook.save(qstats)\n        workbook.close()\n\n    except ModuleNotFoundError as e:\n        s = 234\n        pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n    for n, (q, w) in enumerate(report.questions):\n        q_hidden = issubclass(q.__class__, Hidden)\n        # report.globals = q.globals\n        # q.globals = report.globals\n        if question is not None and n+1 != question:\n            continue\n\n        # Don\'t use f format strings.\n        q_title_print = "Question %i: %s"%(n+1, q.title)\n        print(q_title_print, end="")\n        # sys.stdout.flush()\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # Active progress bar.\n\n        for j, (item, iw) in enumerate(q.items):\n            if qitem is not None and question is not None and item is not None and j+1 != qitem:\n                continue\n            if not q.has_called_init_:\n                start = time.time()\n\n                cc = None\n                if show_progress_bar:\n                    cc = ActiveProgress(t=q.estimated_time, title=q_title_print)\n                with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n                    try:\n                        q.init()  # Initialize the question. Useful for sharing resources.\n                    except Exception as e:\n                        if not passall:\n                            if not silent:\n                                print(" ")\n                                print("="*30)\n                                print(f"When initializing question {q.title} the initialization code threw an error")\n                                print(e)\n                                print("The remaining parts of this question will likely fail.")\n                                print("="*30)\n\n                if show_progress_bar:\n                    cc.terminate()\n                    sys.stdout.flush()\n                    print(q_title_print, end="")\n\n                q.has_called_init_ = True\n                q_time =np.round(  time.time()-start, 2)\n\n                print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n                print("=" * nL)\n\n            item.question = q # Set the parent question instance for later reference.\n            item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n\n            if show_progress_bar:\n                cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n            else:\n                print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n            hidden = issubclass(item.__class__, Hidden)\n            # if not hidden:\n            #     print(ss, end="")\n            # sys.stdout.flush()\n            start = time.time()\n            (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n            q_[j] = {\'w\': iw, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n            tsecs = np.round(time.time()-start, 2)\n            if show_progress_bar:\n                cc.terminate()\n                sys.stdout.flush()\n                print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n\n            if not hidden:\n                ss = "PASS" if current == possible else "*** FAILED"\n                if tsecs >= 0.1:\n                    ss += " ("+ str(tsecs) + " seconds)"\n                print(ss)\n\n        ws, possible, obtained = upack(q_)\n        possible = int(ws @ possible)\n        obtained = int(ws @ obtained)\n        obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'hidden\': q_hidden, \'title\': q.title}\n\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    top_package = os.path.dirname(top_package)\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for m in report.pack_imports:\n            nimp  = gather_imports(m)\n            if len([k for k in nimp if k not in sources]) > 0:\n                print(f"*** {m.__name__}")\n            sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    bzwrite(json_str, token)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.1.8"\n\nfrom cs101courseware_example import homework1\n\nclass ListReversalQuestion(QuestionGroup):\n    title = "Reversal of list"\n\n    class ListReversalItem(QPrintItem):\n        l = [1, 3, 5, 1, 610]\n        def compute_answer_print(self):\n            from cs101courseware_example.homework1 import reverse_list\n            return reverse_list(self.l)\n\n    class ListReversalWordsItem(ListReversalItem):\n        l = ["hello", "world", "summer", "dog"]\n\nclass LinearRegressionQuestion(QuestionGroup):\n    title = "Linear regression and Boston dataset"\n    class CoefficientsItem(QPrintItem):\n        testfun = QPrintItem.assertL2\n        tol = 0.03\n\n        def compute_answer_print(self):\n            from cs101courseware_example.homework1 import boston_linear\n            boston_linear()\n\n        def process_output(self, res, txt, numbers):\n            return numbers[:-1]\n\n    class RMSEItem(CoefficientsItem):\n        def process_output(self, res, txt, numbers):\n            return numbers[-1]\n\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(ListReversalQuestion, 5), (LinearRegressionQuestion, 13)]\n    pack_imports = [homework1] # Include this file in .token file'
 iFlPxdyCqBrgMNDzVoEA='80049512020000000000007d94288c144c697374526576657273616c5175657374696f6e947d94288c0474696d65944700000000000000008c104c697374526576657273616c4974656d947d94288c077061796c6f6164945d94284d62024b014b054b034b01658c0b707265636f6d7075746564944e68034700000000000000008c057469746c65948c104c697374526576657273616c4974656d94758c154c697374526576657273616c576f7264734974656d947d942868065d94288c03646f67948c0673756d6d6572948c05776f726c64948c0568656c6c6f946568084e680347000000000000000068098c154c697374526576657273616c576f7264734974656d9475758c184c696e65617252656772657373696f6e5175657374696f6e947d942868034700000000000000008c10436f656666696369656e74734974656d947d942868065d942847bfbbad207494a76c473fa7c5437cbda6fb473f951ff08b42e9b547400582027fe20d7c47c031c19fcb0c026d47400e7c9dd6eb08cd473f47049a406460ce47bff79a05a5307b9e473fd396c01163bbcd47bf8944115fa064ea47bfee7c69c063070b473f8318255bc9455447bfe0ca713e6cde616568084e6803473f8f2b200000000068098c10436f656666696369656e74734974656d94758c08524d53454974656d947d94286806474012b794bed3e1f168084e6803473f9ec3e00000000068098c08524d53454974656d947575752e'
 iFlPxdyCqBrgMNDzVoEu="Report1"
 iFlPxdyCqBrgMNDzVofu=iFlPxdyCqBrgMNDzVoEH(iFlPxdyCqBrgMNDzVoEu,iFlPxdyCqBrgMNDzVoEL,iFlPxdyCqBrgMNDzVoEA)
diff --git a/docs/legacy/cs101courseware_example/cs101report2.py b/docs/legacy/cs101courseware_example/cs101report2.py
index a3d8691..eb78e8a 100644
--- a/docs/legacy/cs101courseware_example/cs101report2.py
+++ b/docs/legacy/cs101courseware_example/cs101report2.py
@@ -1,4 +1,4 @@
-from unitgrade.unitgrade import QuestionGroup, Report, QPrintItem, Hidden
+from unitgrade_v1.unitgrade import QuestionGroup, Report, QPrintItem, Hidden
 
 class ListReversalQuestion(QuestionGroup):
     title = "Reversal of list"
@@ -37,8 +37,8 @@ class Report2(Report):
     pack_imports = [] # Include this file in .token file
 
 if __name__ == "__main__":
-    # from unitgrade_private.hidden_create_files import setup_answers, setup_grade_file_report
-    from unitgrade.unitgrade_helpers import evaluate_report_student
+    # from unitgrade_private_v1.hidden_create_files import setup_answers, setup_grade_file_report
+    from unitgrade_v1.unitgrade_helpers import evaluate_report_student
     # setup_grade_file_report(Report2, minify=True, bzip=True, obfuscate=True)
     # evaluate_report_student(Report2())
     evaluate_report_student(Report2())
diff --git a/docs/legacy/cs101courseware_example/instructions.py b/docs/legacy/cs101courseware_example/instructions.py
index 9786bd4..9bdad31 100644
--- a/docs/legacy/cs101courseware_example/instructions.py
+++ b/docs/legacy/cs101courseware_example/instructions.py
@@ -8,7 +8,7 @@ def tprint(s):
     print(f"\n> {s}\n")
 
 s = """
-This is an old script to illustrate unitgrade for a very simple, fictitious course. Because this is a build-in old, 
+This is an old script to illustrate unitgrade_v1 for a very simple, fictitious course. Because this is a build-in old, 
 the source code will likely reside in your site-packages directory. The full old can be found at:"""
 fprint(s)
 wdir = os.path.dirname(__file__)
diff --git a/docs/mkdocs.py b/docs/mkdocs.py
index 9e525e5..ca35e72 100644
--- a/docs/mkdocs.py
+++ b/docs/mkdocs.py
@@ -1,6 +1,6 @@
 import jinja2
 import os
-import unitgrade_private2
+import unitgrade_private
 import subprocess
 
 if __name__ == "__main__":
@@ -8,7 +8,7 @@ if __name__ == "__main__":
     bibtex = make_bibliography("../setup.py", "./")
 
     out = subprocess.check_output("python --version").decode("utf-8")
-    fn = unitgrade_private2.__path__[0] + "/../../examples/02631/instructor/programs/report1intro.py"
+    fn = unitgrade_private.__path__[0] + "/../../examples/02631/instructor/programs/report1intro.py"
 
     out = subprocess.check_output(f"cd {os.path.dirname(fn)} && python {os.path.basename(fn)} --noprogress", shell=True, encoding='utf8', errors='strict')
     out = out.replace("", "")
diff --git a/setup.py b/setup.py
index 0527345..713218c 100644
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,9 @@
 # Use this guide:
 # https://packaging.python.org/tutorials/packaging-projects/
 # py -m build && twine upload dist/*
-# from unitgrade2.version import __version__
+# from unitgrade.version import __version__
 import setuptools
-with open("src/unitgrade2/version.py", "r", encoding="utf-8") as fh:
+with open("src/unitgrade/version.py", "r", encoding="utf-8") as fh:
     __version__ = fh.read().split("=")[1].strip()[1:-1]
 # long_description = fh.read()
 
diff --git a/src/unitgrade/__init__.py b/src/unitgrade/__init__.py
index dacfb2d..3f78709 100644
--- a/src/unitgrade/__init__.py
+++ b/src/unitgrade/__init__.py
@@ -1,36 +1,44 @@
-import os
+import unitgrade.version as __version__
+from unitgrade.unitgrade import myround, mfloor, msum, ActiveProgress
+from unitgrade.unitgrade import Capturing, Report, UTestCase, cache, hide
 
-# DONT't import stuff here since install script requires __version__
-
-def cache_write(object, file_name, verbose=True):
-    import compress_pickle
-    dn = os.path.dirname(file_name)
-    if not os.path.exists(dn):
-        os.mkdir(dn)
-    if verbose: print("Writing cache...", file_name)
-    with open(file_name, 'wb', ) as f:
-        compress_pickle.dump(object, f, compression="lzma")
-    if verbose: print("Done!")
+from unitgrade.unitgrade_helpers import evaluate_report_student
 
+# import os
+# import lzma
+# import pickle
 
-def cache_exists(file_name):
-    # file_name = cn_(file_name) if cache_prefix else file_name
-    return os.path.exists(file_name)
-
+# DONT't import stuff here since install script requires __version__
 
-def cache_read(file_name):
-    import compress_pickle # Import here because if you import in top the __version__ tag will fail.
-    # file_name = cn_(file_name) if cache_prefix else file_name
-    if os.path.exists(file_name):
-        try:
-            with open(file_name, 'rb') as f:
-                return compress_pickle.load(f, compression="lzma")
-        except Exception as e:
-            print("Tried to load a bad pickle file at", file_name)
-            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")
-            print(e)
-            # return pickle.load(f)
-    else:
-        return None
+# def cache_write(object, file_name, verbose=True):
+#     # raise Exception("bad")
+#     # import compress_pickle
+#     dn = os.path.dirname(file_name)
+#     if not os.path.exists(dn):
+#         os.mkdir(dn)
+#     if verbose: print("Writing cache...", file_name)
+#     with lzma.open(file_name, 'wb', ) as f:
+#         pickle.dump(object, f)
+#     if verbose: print("Done!")
+#
+#
+# def cache_exists(file_name):
+#     # file_name = cn_(file_name) if cache_prefix else file_name
+#     return os.path.exists(file_name)
+#
+#
+# def cache_read(file_name):
+#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.
+#     # file_name = cn_(file_name) if cache_prefix else file_name
+#     if os.path.exists(file_name):
+#         try:
+#             with lzma.open(file_name, 'rb') as f:
+#                 return pickle.load(f)
+#         except Exception as e:
+#             print("Tried to load a bad pickle file at", file_name)
+#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")
+#             print(e)
+#             # return pickle.load(f)
+#     else:
+#         return None
 
-from unitgrade.unitgrade import Hidden, myround, mfloor, msum, Capturing, ActiveProgress
diff --git a/src/unitgrade/unitgrade.py b/src/unitgrade/unitgrade.py
index a56b4a7..7357532 100644
--- a/src/unitgrade/unitgrade.py
+++ b/src/unitgrade/unitgrade.py
@@ -1,37 +1,43 @@
-"""
-git add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade
-
-"""
-from . import cache_read
-import unittest
 import numpy as np
-import os
 import sys
-from io import StringIO
-import collections
-import inspect
 import re
 import threading
 import tqdm
+import pickle
+import os
+from io import StringIO
+import io
+from unittest.runner import _WritelnDecorator
+from functools import _make_key, RLock
+from typing import Any
+import inspect
+import colorama
+from colorama import Fore
+from collections import namedtuple
+import unittest
 import time
+import textwrap
+
+_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
+
+colorama.init(autoreset=True)  # auto resets your settings after every output
+
+def gprint(s):
+    print(f"{Fore.GREEN}{s}")
 
 myround = lambda x: np.round(x)  # required.
 msum = lambda x: sum(x)
 mfloor = lambda x: np.floor(x)
 
-def setup_dir_by_class(C,base_dir):
+
+def setup_dir_by_class(C, base_dir):
     name = C.__class__.__name__
-    # base_dir = os.path.join(base_dir, name)
-    # if not os.path.isdir(base_dir):
-    #     os.makedirs(base_dir)
     return base_dir, name
 
-class Hidden:
-    def hide(self):
-        return True
 
 class Logger(object):
     def __init__(self, buffer):
+        assert False
         self.terminal = sys.stdout
         self.log = buffer
 
@@ -43,13 +49,15 @@ class Logger(object):
         # this flush method is needed for python 3 compatibility.
         pass
 
+
 class Capturing(list):
-    def __init__(self, *args, unmute=False, **kwargs):
+    def __init__(self, *args, stdout=None, unmute=False, **kwargs):
+        self._stdout = stdout
         self.unmute = unmute
         super().__init__(*args, **kwargs)
 
-    def __enter__(self, capture_errors=True): # don't put arguments here.
-        self._stdout = sys.stdout
+    def __enter__(self, capture_errors=True):  # don't put arguments here.
+        self._stdout = sys.stdout if self._stdout == None else self._stdout
         self._stringio = StringIO()
         if self.unmute:
             sys.stdout = Logger(self._stringio)
@@ -58,290 +66,112 @@ class Capturing(list):
 
         if capture_errors:
             self._sterr = sys.stderr
-            sys.sterr = StringIO() # memory hole it
+            sys.sterr = StringIO()  # memory hole it
         self.capture_errors = capture_errors
         return self
 
     def __exit__(self, *args):
         self.extend(self._stringio.getvalue().splitlines())
-        del self._stringio    # free up some memory
+        del self._stringio  # free up some memory
         sys.stdout = self._stdout
         if self.capture_errors:
             sys.sterr = self._sterr
 
 
-class QItem(unittest.TestCase):
-    title = None
-    testfun = None
-    tol = 0
-    estimated_time = 0.42
-    _precomputed_payload = None
-    _computed_answer = None # Internal helper to later get results.
-    weight = 1 # the weight of the question.
+class Capturing2(Capturing):
+    def __exit__(self, *args):
+        lines = self._stringio.getvalue().splitlines()
+        txt = "\n".join(lines)
+        numbers = extract_numbers(txt)
+        self.extend(lines)
+        del self._stringio  # free up some memory
+        sys.stdout = self._stdout
+        if self.capture_errors:
+            sys.sterr = self._sterr
 
-    def __init__(self, question=None, *args, **kwargs):
-        if self.tol > 0 and self.testfun is None:
-            self.testfun = self.assertL2Relative
-        elif self.testfun is None:
-            self.testfun = self.assertEqual
+        self.output = txt
+        self.numbers = numbers
 
-        self.name = self.__class__.__name__
-        # self._correct_answer_payload = correct_answer_payload
-        self.question = question
 
-        super().__init__(*args, **kwargs)
-        if self.title is None:
-            self.title = self.name
-
-    def _safe_get_title(self):
-        if self._precomputed_title is not None:
-            return self._precomputed_title
-        return self.title
-
-    def assertNorm(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )
-        nrm = np.sqrt(np.sum( diff ** 2))
-
-        self.error_computed = nrm
-
-        if nrm > tol:
-            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
-
-    def assertL2(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
-        self.error_computed = np.max(diff)
-
-        if np.max(diff) > tol:
-            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")
-
-    def assertL2Relative(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
-        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )
-        self.error_computed = np.max(np.abs(diff))
-        if np.sum(diff > tol) > 0:
-            print(f"Not equal within tolerance {tol}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
-
-    def precomputed_payload(self):
-        return self._precomputed_payload
-
-    def precompute_payload(self):
-        # Pre-compute resources to include in tests (useful for getting around rng).
-        pass
+class Report:
+    title = "report title"
+    version = None
+    questions = []
+    pack_imports = []
+    individual_imports = []
+    nL = 120  # Maximum line width
+    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.
 
-    def compute_answer(self, unmute=False):
-        raise NotImplementedError("test code here")
-
-    def test(self, computed, expected):
-        self.testfun(computed, expected)
-
-    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):
-        possible = 1
-        computed = None
-        def show_computed_(computed):
-            print(">>> Your output:")
-            print(computed)
-
-        def show_expected_(expected):
-            print(">>> Expected output (note: may have been processed; read text script):")
-            print(expected)
-
-        correct = self._correct_answer_payload
-        try:
-            if unmute: # Required to not mix together print stuff.
-                print("")
-            computed = self.compute_answer(unmute=unmute)
-        except Exception as e:
-            if not passall:
-                if not silent:
-                    print("\n=================================================================================")
-                    print(f"When trying to run test class '{self.name}' your code threw an error:", e)
-                    show_expected_(correct)
-                    import traceback
-                    print(traceback.format_exc())
-                    print("=================================================================================")
-                return (0, possible)
-
-        if self._computed_answer is None:
-            self._computed_answer = computed
-
-        if show_expected or show_computed:
-            print("\n")
-        if show_expected:
-            show_expected_(correct)
-        if show_computed:
-            show_computed_(computed)
-        try:
-            if not passall:
-                self.test(computed=computed, expected=correct)
-        except Exception as e:
-            if not silent:
-                print("\n=================================================================================")
-                print(f"Test output from test class '{self.name}' does not match expected result. Test error:")
-                print(e)
-                show_computed_(computed)
-                show_expected_(correct)
-            return (0, possible)
-        return (1, possible)
-
-    def score(self):
-        try:
-            self.test()
-        except Exception as e:
-            return 0
-        return 1
-
-class QPrintItem(QItem):
-    def compute_answer_print(self):
-        """
-        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values
-        are send to process_output (see compute_answer below). In other words, the text generated is:
-
-        res = compute_Answer_print()
-        txt = (any terminal output generated above)
-        numbers = (any numbers found in terminal-output txt)
-
-        self.test(process_output(res, txt, numbers), <expected result>)
-
-        :return: Optional values for comparison
-        """
-        raise Exception("Generate output here. The output is passed to self.process_output")
-
-    def process_output(self, res, txt, numbers):
-        return res
-
-    def compute_answer(self, unmute=False):
-        with Capturing(unmute=unmute) as output:
-            res = self.compute_answer_print()
-        s = "\n".join(output)
-        s = rm_progress_bar(s) # Remove progress bar.
-        numbers = extract_numbers(s)
-        self._computed_answer = (res, s, numbers)
-        return self.process_output(res, s, numbers)
-
-class OrderedClassMembers(type):
     @classmethod
-    def __prepare__(self, name, bases):
-        return collections.OrderedDict()
-    def __new__(self, name, bases, classdict):
-        ks = list(classdict.keys())
-        for b in bases:
-            ks += b.__ordered__
-        classdict['__ordered__'] = [key for key in ks if key not in ('__module__', '__qualname__')]
-        return type.__new__(self, name, bases, classdict)
-
-class QuestionGroup(metaclass=OrderedClassMembers):
-    title = "Untitled question"
-    partially_scored = False
-    t_init = 0  # Time spend on initialization (placeholder; set this externally).
-    estimated_time = 0.42
-    has_called_init_ = False
-    _name = None
-    _items = None
-
-    @property
-    def items(self):
-        if self._items == None:
-            self._items = []
-            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]
-            for I in members:
-                self._items.append( I(question=self))
-        return self._items
-
-    @items.setter
-    def items(self, value):
-        self._items = value
-
-    @property
-    def name(self):
-        if self._name == None:
-            self._name = self.__class__.__name__
-        return self._name #
+    def reset(cls):
+        for (q, _) in cls.questions:
+            if hasattr(q, 'reset'):
+                q.reset()
 
-    @name.setter
-    def name(self, val):
-        self._name = val
-
-    def init(self):
-        # Can be used to set resources relevant for this question instance.
-        pass
+    @classmethod
+    def mfile(clc):
+        return inspect.getfile(clc)
 
-    def init_all_item_questions(self):
-        for item in self.items:
-            if not item.question.has_called_init_:
-                item.question.init()
-                item.question.has_called_init_ = True
+    def _file(self):
+        return inspect.getfile(type(self))
 
+    def _import_base_relative(self):
+        if hasattr(self.pack_imports[0], '__path__'):
+            root_dir = self.pack_imports[0].__path__._path[0]
+        else:
+            root_dir = self.pack_imports[0].__file__
 
-class Report():
-    title = "report title"
-    version = None
-    questions = []
-    pack_imports = []
-    individual_imports = []
+        root_dir = os.path.dirname(root_dir)
+        relative_path = os.path.relpath(self._file(), root_dir)
+        modules = os.path.normpath(relative_path[:-3]).split(os.sep)
+        return root_dir, relative_path, modules
 
     def __init__(self, strict=False, payload=None):
-        working_directory = os.path.abspath(os.path.dirname(inspect.getfile(type(self))))
+        working_directory = os.path.abspath(os.path.dirname(self._file()))
         self.wdir, self.name = setup_dir_by_class(self, working_directory)
-        self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
-        import time
-        qs = [] # Has to accumulate to new array otherwise the setup/evaluation steps cannot be run in sequence.
-        for k, (Q, w) in enumerate(self.questions):
-            start = time.time()
-            q = Q()
-            q.t_init = time.time() - start
-            for k, i in enumerate(q.items):
-                i.name = i.name + "_" + str(k)
-            qs.append((q, w))
-
-        self.questions = qs
+        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
+        for (q, _) in self.questions:
+            q.nL = self.nL  # Set maximum line length.
+
         if payload is not None:
             self.set_payload(payload, strict=strict)
-        else:
-            if os.path.isfile(self.computed_answers_file):
-                self.set_payload(cache_read(self.computed_answers_file), strict=strict)
-            else:
-                s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."
-                if strict:
-                    raise Exception(s)
-                else:
-                    print(s)
 
+    def main(self, verbosity=1):
+        # Run all tests using standard unittest (nothing fancy).
+        loader = unittest.TestLoader()
+        for q, _ in self.questions:
+            start = time.time()  # A good proxy for setup time is to
+            suite = loader.loadTestsFromTestCase(q)
+            unittest.TextTestRunner(verbosity=verbosity).run(suite)
+            total = time.time() - start
+            q.time = total
+
+    def _setup_answers(self, with_coverage=False):
+        if with_coverage:
+            for q, _ in self.questions:
+                q._with_coverage = True
+                q._report = self
+
+        self.main()  # Run all tests in class just to get that out of the way...
+        report_cache = {}
+        for q, _ in self.questions:
+            # print(self.questions)
+            if hasattr(q, '_save_cache'):
+                q()._save_cache()
+                print("q is", q())
+                q()._cache_put('time', q.time) # = q.time
+                report_cache[q.__qualname__] = q._cache2
+            else:
+                report_cache[q.__qualname__] = {'no cache see _setup_answers in unitgrade.py': True}
+        if with_coverage:
+            for q, _ in self.questions:
+                q._with_coverage = False
+        return report_cache
 
     def set_payload(self, payloads, strict=False):
         for q, _ in self.questions:
-            for item in q.items:
-                if q.name not in payloads or item.name not in payloads[q.name]:
-                    s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."
-                    if strict:
-                        raise Exception(s)
-                    else:
-                        print(s)
-                else:
-                    item._correct_answer_payload = payloads[q.name][item.name]['payload']
-                    item.estimated_time = payloads[q.name][item.name].get("time", 1)
-                    q.estimated_time = payloads[q.name].get("time", 1)
-                    if "precomputed" in payloads[q.name][item.name]: # Consider removing later.
-                        item._precomputed_payload = payloads[q.name][item.name]['precomputed']
-                    try:
-                        if "title" in payloads[q.name][item.name]: # can perhaps be removed later.
-                            item.title = payloads[q.name][item.name]['title']
-                    except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).
-                        pass
-                        # print("bad", e)
-        self.payloads = payloads
-
+            q._cache = payloads[q.__qualname__]
+        self._config = payloads['config']
 
 def rm_progress_bar(txt):
     # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.
@@ -350,61 +180,68 @@ def rm_progress_bar(txt):
         pct = l.find("%")
         ql = False
         if pct > 0:
-            i = l.find("|", pct+1)
-            if i > 0 and l.find("|", i+1) > 0:
+            i = l.find("|", pct + 1)
+            if i > 0 and l.find("|", i + 1) > 0:
                 ql = True
         if not ql:
             nlines.append(l)
     return "\n".join(nlines)
 
+
 def extract_numbers(txt):
     # txt = rm_progress_bar(txt)
-    numeric_const_pattern = '[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
+    numeric_const_pattern = r'[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
     rx = re.compile(numeric_const_pattern, re.VERBOSE)
     all = rx.findall(txt)
     all = [float(a) if ('.' in a or "e" in a) else int(a) for a in all]
     if len(all) > 500:
         print(txt)
-        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))
+        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))
     return all
 
 
 class ActiveProgress():
-    def __init__(self, t, start=True, title="my progress bar"):
+    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):
+        if file == None:
+            file = sys.stdout
+        self.file = file
         self.t = t
         self._running = False
         self.title = title
-        self.dt = 0.1
-
+        self.dt = 0.01
         self.n = int(np.round(self.t / self.dt))
-        # self.pbar = tqdm.tqdm(total=self.n)
-
+        self.show_progress_bar = show_progress_bar
+        self.pbar = None
 
         if start:
             self.start()
 
     def start(self):
         self._running = True
-        self.thread = threading.Thread(target=self.run)
-        self.thread.start()
+        if self.show_progress_bar:
+            self.thread = threading.Thread(target=self.run)
+            self.thread.start()
+        self.time_started = time.time()
 
     def terminate(self):
-
-
+        if not self._running:
+            raise Exception("Stopping a stopped progress bar. ")
         self._running = False
-        self.thread.join()
-        if hasattr(self, 'pbar') and self.pbar is not None:
+        if self.show_progress_bar:
+            self.thread.join()
+        if self.pbar is not None:
             self.pbar.update(1)
             self.pbar.close()
-            self.pbar=None
+            self.pbar = None
 
-        sys.stdout.flush()
+        self.file.flush()
+        return time.time() - self.time_started
 
     def run(self):
-        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,
-                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')  # , unit_scale=dt, unit='seconds'):
+        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,
+                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')
 
-        for _ in range(self.n-1): # Don't terminate completely; leave bar at 99% done until terminate.
+        for _ in range(self.n - 1):  # Don't terminate completely; leave bar at 99% done until terminate.
             if not self._running:
                 self.pbar.close()
                 self.pbar = None
@@ -412,3 +249,439 @@ class ActiveProgress():
 
             time.sleep(self.dt)
             self.pbar.update(1)
+
+def dprint(first, last, nL, extra = "", file=None, dotsym='.', color='white'):
+    if file == None:
+        file = sys.stdout
+    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))
+    print(first + dot_parts, end="", file=file)
+    last += extra
+    print(last, file=file)
+
+
+class UTextResult(unittest.TextTestResult):
+    nL = 80
+    number = -1  # HAcky way to set question number.
+    show_progress_bar = True
+    cc = None
+
+    def __init__(self, stream, descriptions, verbosity):
+        super().__init__(stream, descriptions, verbosity)
+        self.successes = []
+
+    def printErrors(self) -> None:
+        self.printErrorList('ERROR', self.errors)
+        self.printErrorList('FAIL', self.failures)
+
+    def addError(self, test, err):
+        super(unittest.TextTestResult, self).addFailure(test, err)
+        self.cc_terminate(success=False)
+
+    def addFailure(self, test, err):
+        super(unittest.TextTestResult, self).addFailure(test, err)
+        self.cc_terminate(success=False)
+
+    def addSuccess(self, test: unittest.case.TestCase) -> None:
+        self.successes.append(test)
+        self.cc_terminate()
+
+    def cc_terminate(self, success=True):
+        if self.show_progress_bar or True:
+            tsecs = np.round(self.cc.terminate(), 2)
+            self.cc.file.flush()
+            ss = self.item_title_print
+
+            state = "PASS" if success else "FAILED"
+
+            dot_parts = ('.' * max(0, self.nL - len(state) - len(ss)))
+            if self.show_progress_bar or True:
+                print(self.item_title_print + dot_parts, end="", file=self.cc.file)
+            else:
+                print(dot_parts, end="", file=self.cc.file)
+
+            if tsecs >= 0.5:
+                state += " (" + str(tsecs) + " seconds)"
+            print(state, file=self.cc.file)
+
+    def startTest(self, test):
+        # j =self.testsRun
+        self.testsRun += 1
+        # item_title = self.getDescription(test)
+        item_title = test.shortDescription()  # Better for printing (get from cache).
+        if item_title == None:
+            # For unittest framework where getDescription may return None.
+            item_title = self.getDescription(test)
+        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)
+        estimated_time = 10
+        if self.show_progress_bar or True:
+            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)
+        else:
+            print(self.item_title_print + ('.' * max(0, self.nL - 4 - len(self.item_title_print))), end="")
+
+        self._test = test
+        self._stdout = sys.stdout
+        sys.stdout = io.StringIO()
+
+    def stopTest(self, test):
+        sys.stdout = self._stdout
+        super().stopTest(test)
+
+    def _setupStdout(self):
+        if self._previousTestClass == None:
+            total_estimated_time = 1
+            if hasattr(self.__class__, 'q_title_print'):
+                q_title_print = self.__class__.q_title_print
+            else:
+                q_title_print = "<unnamed test. See unitgrade_v1.py>"
+
+            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)
+            self.cc = cc
+
+    def _restoreStdout(self):  # Used when setting up the test.
+        if self._previousTestClass is None:
+            q_time = self.cc.terminate()
+            q_time = np.round(q_time, 2)
+            sys.stdout.flush()
+            if self.show_progress_bar:
+                print(self.cc.title, end="")
+            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))
+
+
+class UTextTestRunner(unittest.TextTestRunner):
+    def __init__(self, *args, **kwargs):
+        stream = io.StringIO()
+        super().__init__(*args, stream=stream, **kwargs)
+
+    def _makeResult(self):
+        # stream = self.stream # not you!
+        stream = sys.stdout
+        stream = _WritelnDecorator(stream)
+        return self.resultclass(stream, self.descriptions, self.verbosity)
+
+
+def cache(foo, typed=False):
+    """ Magic cache wrapper
+    https://github.com/python/cpython/blob/main/Lib/functools.py
+    """
+    maxsize = None
+    def wrapper(self, *args, **kwargs):
+        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))
+        if not self._cache_contains(key):
+            value = foo(self, *args, **kwargs)
+            self._cache_put(key, value)
+        else:
+            value = self._cache_get(key)
+        return value
+
+    return wrapper
+
+
+def get_hints(ss):
+    if ss == None:
+        return None
+    try:
+        ss = textwrap.dedent(ss)
+        ss = ss.replace('''"""''', "").strip()
+        hints = ["hints:", "hint:"]
+        j = np.argmax([ss.lower().find(h) for h in hints])
+        h = hints[j]
+        ss = ss[ss.lower().find(h) + len(h) + 1:]
+        ss = "\n".join([l for l in ss.split("\n") if not l.strip().startswith(":")])
+        ss = textwrap.dedent(ss).strip()
+        return ss
+    except Exception as e:
+        print("bad hints", ss, e)
+
+
+class UTestCase(unittest.TestCase):
+    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.
+    _cache = None  # Read-only cache. Ensures method always produce same result.
+    _cache2 = None  # User-written cache.
+    _with_coverage = False
+    _covcache = None # Coverage cache. Written to if _with_coverage is true.
+    _report = None  # The report used. This is very, very hacky and should always be None. Don't rely on it!
+
+
+    def capture(self):
+        if hasattr(self, '_stdout') and self._stdout is not None:
+            file = self._stdout
+        else:
+            # self._stdout = sys.stdout
+            # sys._stdout = io.StringIO()
+            file = sys.stdout
+        return Capturing2(stdout=file)
+
+    @classmethod
+    def question_title(cls):
+        """ Return the question title """
+        return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__
+
+    @classmethod
+    def reset(cls):
+        print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")
+        cls._outcome = None
+        cls._cache = None
+        cls._cache2 = None
+
+    def _callSetUp(self):
+        if self._with_coverage:
+            if self._covcache is None:
+                self._covcache = {}
+            import coverage
+            self.cov = coverage.Coverage(data_file=None)
+            self.cov.start()
+        self.setUp()
+
+    def _callTearDown(self):
+        self.tearDown()
+        if self._with_coverage:
+            from pathlib import Path
+            from snipper import snipper_main
+            self.cov.stop()
+            data = self.cov.get_data()
+            base, _, _ = self._report._import_base_relative()
+            for file in data.measured_files():
+                file = os.path.normpath(file)
+                root = Path(base)
+                child = Path(file)
+                if root in child.parents:
+                    with open(child, 'r') as f:
+                        s = f.read()
+                    lines = s.splitlines()
+                    garb = 'GARBAGE'
+                    lines2 = snipper_main.censor_code(lines, keep=True)
+                    assert len(lines) == len(lines2)
+                    for l in data.contexts_by_lineno(file):
+                        if lines2[l].strip() == garb:
+                            rel = os.path.relpath(child, root)
+                            cc = self._covcache
+                            j = 0
+                            for j in range(l, -1, -1):
+                                if "def" in lines2[j] or "class" in lines2[j]:
+                                    break
+                            from snipper.legacy import gcoms
+                            fun = lines2[j]
+                            comments, _ = gcoms("\n".join(lines2[j:l]))
+                            if rel not in cc:
+                                cc[rel] = {}
+                            cc[rel][fun] = (l, "\n".join(comments))
+                            self._cache_put((self.cache_id(), 'coverage'), self._covcache)
+
+    def shortDescriptionStandard(self):
+        sd = super().shortDescription()
+        if sd is None:
+            sd = self._testMethodName
+        return sd
+
+    def shortDescription(self):
+        sd = self.shortDescriptionStandard()
+        title = self._cache_get((self.cache_id(), 'title'), sd)
+        return title if title is not None else sd
+
+    @property
+    def title(self):
+        return self.shortDescription()
+
+    @title.setter
+    def title(self, value):
+        self._cache_put((self.cache_id(), 'title'), value)
+
+    def _get_outcome(self):
+        if not (self.__class__, '_outcome') or self.__class__._outcome is None:
+            self.__class__._outcome = {}
+        return self.__class__._outcome
+
+    def _callTestMethod(self, testMethod):
+        t = time.time()
+        self._ensure_cache_exists()  # Make sure cache is there.
+        if self._testMethodDoc is not None:
+            self._cache_put((self.cache_id(), 'title'), self.shortDescriptionStandard())
+
+        self._cache2[(self.cache_id(), 'assert')] = {}
+        res = testMethod()
+        elapsed = time.time() - t
+        self._get_outcome()[self.cache_id()] = res
+        self._cache_put((self.cache_id(), "time"), elapsed)
+
+    def cache_id(self):
+        c = self.__class__.__qualname__
+        m = self._testMethodName
+        return c, m
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._load_cache()
+        self._assert_cache_index = 0
+
+    def _ensure_cache_exists(self):
+        if not hasattr(self.__class__, '_cache') or self.__class__._cache == None:
+            self.__class__._cache = dict()
+        if not hasattr(self.__class__, '_cache2') or self.__class__._cache2 == None:
+            self.__class__._cache2 = dict()
+
+    def _cache_get(self, key, default=None):
+        self._ensure_cache_exists()
+        return self.__class__._cache.get(key, default)
+
+    def _cache_put(self, key, value):
+        self._ensure_cache_exists()
+        self.__class__._cache2[key] = value
+
+    def _cache_contains(self, key):
+        self._ensure_cache_exists()
+        return key in self.__class__._cache
+
+    def wrap_assert(self, assert_fun, first, *args, **kwargs):
+        # sys.stdout = self._stdout
+        key = (self.cache_id(), 'assert')
+        if not self._cache_contains(key):
+            print("Warning, framework missing", key)
+            self.__class__._cache[
+                key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.
+        cache = self._cache_get(key)
+        id = self._assert_cache_index
+        if not id in cache:
+            print("Warning, framework missing cache index", key, "id =", id)
+        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")
+
+        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.
+        cache[id] = first
+        self._cache_put(key, cache)
+        self._assert_cache_index += 1
+        assert_fun(first, _expected, *args, **kwargs)
+
+    def assertEqualC(self, first: Any, msg: Any = ...) -> None:
+        self.wrap_assert(self.assertEqual, first, msg)
+
+    def _cache_file(self):
+        return os.path.dirname(inspect.getabsfile(type(self))) + "/unitgrade_data/" + self.__class__.__name__ + ".pkl"
+
+    def _save_cache(self):
+        # get the class name (i.e. what to save to).
+        cfile = self._cache_file()
+        if not os.path.isdir(os.path.dirname(cfile)):
+            os.makedirs(os.path.dirname(cfile))
+
+        if hasattr(self.__class__, '_cache2'):
+            with open(cfile, 'wb') as f:
+                pickle.dump(self.__class__._cache2, f)
+
+    # But you can also set cache explicitly.
+    def _load_cache(self):
+        if self._cache is not None:  # Cache already loaded. We will not load it twice.
+            return
+            # raise Exception("Loaded cache which was already set. What is going on?!")
+        cfile = self._cache_file()
+        if os.path.exists(cfile):
+            try:
+                # print("\ncache file", cfile)
+                with open(cfile, 'rb') as f:
+                    data = pickle.load(f)
+                self.__class__._cache = data
+            except Exception as e:
+                print("Bad cache", cfile)
+                print(e)
+        else:
+            print("Warning! data file not found", cfile)
+
+    def _feedErrorsToResult(self, result, errors):
+        """ Use this to show hints on test failure. """
+        if not isinstance(result, UTextResult):
+            er = [e for e, v in errors if v != None]
+
+            if len(er) > 0:
+                hints = []
+                key = (self.cache_id(), 'coverage')
+                if self._cache_contains(key):
+                    CC = self._cache_get(key)
+                    cl, m = self.cache_id()
+                    gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:")  # For the test {id} in {file} you should edit:")
+                    for file in CC:
+                        rec = CC[file]
+                        gprint(f">   * {file}")
+                        for l in rec:
+                            _, comments = CC[file][l]
+                            hint = get_hints(comments)
+
+                            if hint != None:
+                                hints.append((hint, file, l) )
+                            gprint(f">      - {l}")
+
+                er = er[0]
+                doc = er._testMethodDoc
+                if doc is not None:
+                    hint = get_hints(er._testMethodDoc)
+                    if hint is not None:
+                        hints = [(hint, None, self.cache_id()[1] )] + hints
+                if len(hints) > 0:
+                    # print(hints)
+                    for hint, file, method in hints:
+                        s = (f"'{method.strip()}'" if method is not None else "")
+                        if method is not None and file is not None:
+                            s += " in "
+                        try:
+                            s += (file.strip() if file is not None else "")
+                            gprint(">")
+                            gprint("> Hints (from " + s  + ")")
+                            gprint(textwrap.indent(hint, ">   "))
+                        except Exception as e:
+                            print("Bad stuff in hints. ")
+                            print(hints)
+
+        super()._feedErrorsToResult(result, errors)
+
+    def startTestRun(self):
+        # print("asdfsdaf 11", file=sys.stderr)
+        super().startTestRun()
+        # print("asdfsdaf")
+
+    def _callTestMethod(self, method):
+        # print("asdfsdaf")
+        super()._callTestMethod(method)
+
+
+def hide(func):
+    return func
+
+
+def makeRegisteringDecorator(foreignDecorator):
+    """
+        Returns a copy of foreignDecorator, which is identical in every
+        way(*), except also appends a .decorator property to the callable it
+        spits out.
+    """
+
+    def newDecorator(func):
+        # Call to newDecorator(method)
+        # Exactly like old decorator, but output keeps track of what decorated it
+        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done
+        R.decorator = newDecorator  # keep track of decorator
+        # R.original = func         # might as well keep track of everything!
+        return R
+
+    newDecorator.__name__ = foreignDecorator.__name__
+    newDecorator.__doc__ = foreignDecorator.__doc__
+    return newDecorator
+
+hide = makeRegisteringDecorator(hide)
+
+def methodsWithDecorator(cls, decorator):
+    """
+        Returns all methods in CLS with DECORATOR as the
+        outermost decorator.
+
+        DECORATOR must be a "registering decorator"; one
+        can make any decorator "registering" via the
+        makeRegisteringDecorator function.
+
+        import inspect
+        ls = list(methodsWithDecorator(GeneratorQuestion, deco))
+        for f in ls:
+            print(inspect.getsourcelines(f) ) # How to get all hidden questions.
+    """
+    for maybeDecorated in cls.__dict__.values():
+        if hasattr(maybeDecorated, 'decorator'):
+            if maybeDecorated.decorator == decorator:
+                print(maybeDecorated)
+                yield maybeDecorated
+# 817, 705
\ No newline at end of file
diff --git a/src/unitgrade/unitgrade_helpers.py b/src/unitgrade/unitgrade_helpers.py
index 06d2e37..607ced9 100644
--- a/src/unitgrade/unitgrade_helpers.py
+++ b/src/unitgrade/unitgrade_helpers.py
@@ -2,20 +2,14 @@ import numpy as np
 from tabulate import tabulate
 from datetime import datetime
 import pyfiglet
-from unitgrade import Hidden, myround, msum, ActiveProgress
-# import unitgrade
-
-# from unitgrade.unitgrade import Hidden
-# import unitgrade as ug
-# import unitgrade.unitgrade as ug
+from unitgrade import msum
+import unittest
+from unitgrade.unitgrade import UTextResult
 import inspect
 import os
 import argparse
-import sys
 import time
 
-#from threading import Thread  # This import presents a problem for the minify-code compression tool.
-
 parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: 
 To run all tests in a report: 
 
@@ -42,9 +36,13 @@ parser.add_argument('--showexpected',  action="store_true",  help='Show the expe
 parser.add_argument('--showcomputed',  action="store_true",  help='Show the answer your code computes')
 parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
 parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars.')
 
-def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):
+def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False, show_privisional=True, noprogress=None):
     args = parser.parse_args()
+    if noprogress is None:
+        noprogress = args.noprogress
+
     if question is None and args.q is not None:
         question = args.q
         if "." in question:
@@ -52,7 +50,7 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass
         else:
             question = int(question)
 
-    if not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
+    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
         raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")
 
     if unmute is None:
@@ -60,57 +58,13 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass
     if passall is None:
         passall = args.passall
 
-    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not args.noprogress, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
+
+    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,
+                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
                                           show_tol_err=show_tol_err)
 
-    try:  # For registering stats.
-        import unitgrade_private
-        import irlc.lectures
-        import xlwings
-        from openpyxl import Workbook
-        import pandas as pd
-        from collections import defaultdict
-        dd = defaultdict(lambda: [])
-        error_computed = []
-        for k1, (q, _) in enumerate(report.questions):
-            for k2, item in enumerate(q.items):
-                dd['question_index'].append(k1)
-                dd['item_index'].append(k2)
-                dd['question'].append(q.name)
-                dd['item'].append(item.name)
-                dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol)
-                error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed)
-
-        qstats = report.wdir + "/" + report.name + ".xlsx"
-
-        if os.path.isfile(qstats):
-            d_read = pd.read_excel(qstats).to_dict()
-        else:
-            d_read = dict()
-
-        for k in range(1000):
-            key = 'run_'+str(k)
-            if key in d_read:
-                dd[key] = list(d_read['run_0'].values())
-            else:
-                dd[key] = error_computed
-                break
-
-        workbook = Workbook()
-        worksheet = workbook.active
-        for col, key in enumerate(dd.keys()):
-            worksheet.cell(row=1, column=col+1).value = key
-            for row, item in enumerate(dd[key]):
-                worksheet.cell(row=row+2, column=col+1).value = item
-
-        workbook.save(qstats)
-        workbook.close()
-
-    except ModuleNotFoundError as e:
-        s = 234
-        pass
-
-    if question is None:
+
+    if question is None and show_privisional:
         print("Provisional evaluation")
         tabulate(table_data)
         table = table_data
@@ -123,6 +77,8 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass
         print("Note your results have not yet been registered. \nTo register your results, please run the file:")
         print(">>>", gfile)
         print("In the same manner as you ran this file.")
+
+
     return results
 
 
@@ -132,121 +88,89 @@ def upack(q):
     h = np.asarray(h)
     return h[:,0], h[:,1], h[:,2],
 
-
+# class UnitgradeTextRunner(unittest.TextTestRunner):
+#     def __init__(self, *args, **kwargs):
+#         super().__init__(*args, **kwargs)
+
+class SequentialTestLoader(unittest.TestLoader):
+    def getTestCaseNames(self, testCaseClass):
+        test_names = super().getTestCaseNames(testCaseClass)
+        # testcase_methods = list(testCaseClass.__dict__.keys())
+        ls = []
+        for C in testCaseClass.mro():
+            if issubclass(C, unittest.TestCase):
+                ls = list(C.__dict__.keys()) + ls
+        testcase_methods = ls
+        test_names.sort(key=testcase_methods.index)
+        return test_names
 
 def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
                     show_progress_bar=True,
-                    show_tol_err=False):
-    from src.snipper.version import __version__
+                    show_tol_err=False,
+                    big_header=True):
+
+    from unitgrade.version import __version__
     now = datetime.now()
-    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
-    b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
-    print(b + " v" + __version__)
+    if big_header:
+        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
+        b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    else:
+        b = "Unitgrade"
     dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
-    print("Started: " + dt_string)
+    print(b + " v" + __version__ + ", started: " + dt_string+ "\n")
+    # print("Started: " + dt_string)
     s = report.title
-    if report.version is not None:
+    if hasattr(report, "version") and report.version is not None:
         s += " version " + report.version
-    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")
-    print(f"Loaded answers from: ", report.computed_answers_file, "\n")
+    print(s, "(use --help for options)" if show_help_flag else "")
+    # print(f"Loaded answers from: ", report.computed_answers_file, "\n")
     table_data = []
-    nL = 80
     t_start = time.time()
     score = {}
+    loader = SequentialTestLoader()
+
     for n, (q, w) in enumerate(report.questions):
-        q_hidden = issubclass(q.__class__, Hidden)
         if question is not None and n+1 != question:
             continue
-        q_title_print = "Question %i: %s"%(n+1, q.title)
+        suite = loader.loadTestsFromTestCase(q)
+        qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__
+        q_title_print = "Question %i: %s"%(n+1, qtitle)
         print(q_title_print, end="")
         q.possible = 0
         q.obtained = 0
         q_ = {} # Gather score in this class.
+        from unitgrade.unitgrade import UTextTestRunner
+        UTextResult.q_title_print = q_title_print # Hacky
+        UTextResult.show_progress_bar = show_progress_bar # Hacky.
+        UTextResult.number = n
+        UTextResult.nL = report.nL
+
+        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
+
+        for s in res.successes:
+            q_[s._testMethodName] = ("pass",None)
+        for (s,msg) in res.failures:
+            q_[s._testMethodName] = ("fail", msg)
+        for (s,msg) in res.errors:
+            q_[s._testMethodName] = ("error", msg)
 
-        q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
-
-        for j, item in enumerate(q.items):
-            if qitem is not None and question is not None and j+1 != qitem:
-                continue
-
-            if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.
-                # if not item.question.has_called_init_:
-                start = time.time()
-
-                cc = None
-                if show_progress_bar:
-                    total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )
-                    cc = ActiveProgress(t=total_estimated_time, title=q_title_print)
-                with eval('Capturing')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.
-                    try:
-                        for q2 in q_with_outstanding_init:
-                            q2.init()
-                            q2.has_called_init_ = True
-
-                        # item.question.init()  # Initialize the question. Useful for sharing resources.
-                    except Exception as e:
-                        if not passall:
-                            if not silent:
-                                print(" ")
-                                print("="*30)
-                                print(f"When initializing question {q.title} the initialization code threw an error")
-                                print(e)
-                                print("The remaining parts of this question will likely fail.")
-                                print("="*30)
-
-                if show_progress_bar:
-                    cc.terminate()
-                    sys.stdout.flush()
-                    print(q_title_print, end="")
-
-                # item.question.has_called_init_ = True
-                q_time =np.round(  time.time()-start, 2)
-
-                print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")
-                print("=" * nL)
-                q_with_outstanding_init = None
-
-            # item.question = q # Set the parent question instance for later reference.
-            item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)
-
-            if show_progress_bar:
-                cc = ActiveProgress(t=item.estimated_time, title=item_title_print)
-            else:
-                print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="")
-            hidden = issubclass(item.__class__, Hidden)
-            # if not hidden:
-            #     print(ss, end="")
-            # sys.stdout.flush()
-            start = time.time()
-
-            (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)
-            q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title}
-            tsecs = np.round(time.time()-start, 2)
-            if show_progress_bar:
-                cc.terminate()
-                sys.stdout.flush()
-                print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="")
-
-            if not hidden:
-                ss = "PASS" if current == possible else "*** FAILED"
-                if tsecs >= 0.1:
-                    ss += " ("+ str(tsecs) + " seconds)"
-                print(ss)
-
-        ws, possible, obtained = upack(q_)
-        possible = int(ws @ possible)
-        obtained = int(ws @ obtained)
-        obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
-        score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'hidden': q_hidden, 'title': q.title}
 
+        possible = res.testsRun
+        obtained = len(res.successes)
+
+        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
+
+        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
+        score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle, 'name': q.__name__}
         q.obtained = obtained
         q.possible = possible
-
-        s1 = f"*** Question q{n+1}"
+        # print(q._cache)
+        # print(q._covcache)
+        s1 = f" * q{n+1})   Total"
         s2 = f" {q.obtained}/{w}"
-        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )
+        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )
         print(" ")
-        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])
+        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])
 
     ws, possible, obtained = upack(score)
     possible = int( msum(possible) )
@@ -261,7 +185,11 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
     seconds = dt - minutes*60
     plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
 
-    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")
+    from unitgrade.unitgrade import dprint
+    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",
+           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)
+
+    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")
 
     table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
     results = {'total': (obtained, possible), 'details': score}
diff --git a/src/unitgrade/version.py b/src/unitgrade/version.py
index a68927d..48fef32 100644
--- a/src/unitgrade/version.py
+++ b/src/unitgrade/version.py
@@ -1 +1 @@
-__version__ = "0.1.0"
\ No newline at end of file
+__version__ = "0.1.2"
\ No newline at end of file
diff --git a/src/unitgrade2/__init__.py b/src/unitgrade2/__init__.py
deleted file mode 100644
index caeb0a3..0000000
--- a/src/unitgrade2/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import unitgrade2.version as __version__
-from unitgrade2.unitgrade2 import myround, mfloor, msum, ActiveProgress
-from unitgrade2.unitgrade2 import Capturing, Report, UTestCase, cache, hide
-
-from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-
-# import os
-# import lzma
-# import pickle
-
-# DONT't import stuff here since install script requires __version__
-
-# def cache_write(object, file_name, verbose=True):
-#     # raise Exception("bad")
-#     # import compress_pickle
-#     dn = os.path.dirname(file_name)
-#     if not os.path.exists(dn):
-#         os.mkdir(dn)
-#     if verbose: print("Writing cache...", file_name)
-#     with lzma.open(file_name, 'wb', ) as f:
-#         pickle.dump(object, f)
-#     if verbose: print("Done!")
-#
-#
-# def cache_exists(file_name):
-#     # file_name = cn_(file_name) if cache_prefix else file_name
-#     return os.path.exists(file_name)
-#
-#
-# def cache_read(file_name):
-#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.
-#     # file_name = cn_(file_name) if cache_prefix else file_name
-#     if os.path.exists(file_name):
-#         try:
-#             with lzma.open(file_name, 'rb') as f:
-#                 return pickle.load(f)
-#         except Exception as e:
-#             print("Tried to load a bad pickle file at", file_name)
-#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")
-#             print(e)
-#             # return pickle.load(f)
-#     else:
-#         return None
-
diff --git a/src/unitgrade2/__pycache__/__init__.cpython-38.pyc b/src/unitgrade2/__pycache__/__init__.cpython-38.pyc
deleted file mode 100644
index d56ac9f30a848efb81409d9ed034b964f0dabe35..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 494
zcmWIL<>g{vU|?|Iuu61iV_<j;;vi!tkR$^GLva8D149Z!3S$mKE@KoEBZ$qE!<@?!
z#gfY!#hS|&#g@w+#SRu{&SB2wh~miQjN;7YisH)Uj^fVciQ)mvvE=aP@<s76GNdpD
zGib8D1li@M$#RQ5x3Va|G%w{ATW(rTetyv{mfYgf+*^E($t9U(sR2d#=|!o<#hR?Q
zIGqy<N=l0|^U`mz1*I0`7nR)N3=K&wE^$sQPQAsNoS2-EdW$6^GbL4%@s@aMSz=CU
zVo7Rz5m;$_aY<=PYF>#Zf0R&ZUS>&pQDRD}kzQG9QE_H|-Ys?*gF8MR%8ZYX5=Byp
zU>1Qwriht=fuV>6M6iMgHW0xMA~-+<Cx`%rZ<H)TqaK_YpOKnVkXlr1RK(4|z_5~`
zh!3P5MEtUGwu%WYPAw{qDJjiJjd96OF3nBND=ChFYmO-{N=EREV&da7^D;}~<Mj$E
aZ*kb<=BJeAq}nlpVh-de9!3d9872UL_==?f

diff --git a/src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc b/src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc
deleted file mode 100644
index 59bb92f4d418f4c6e20156d57457f67fac1d2d28..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 22788
zcmWIL<>g{vU|`tcYL)mjiGkrUh=YvT7#J8F7#J9e&oDADq%fo~<}gGtf@!8GCNRw$
z#SErdqFBH*YZNP(W{YA2)9g{~DGVu0IUKp1QJjnrGdQ9+QW#R0bGUN3qqxChEIB;6
zyivShHe(K76dzbGe-wWTLkepSf385309cMKM=)0?N+?%2N*K&%&k@NLjS>a38FR#<
z#K8K*qr}0qM3h9TXbMLPX9`yecM4BBOEXiHWGa7_R0?kjUoRsgLkfS2Knp{Zbc$e#
zPzytpOo}j=mQ4`>({d@IDPk=QQS$B#DdH&-Eet6VsS3@^QHt&iDUvBtEet7AsY=bv
zQOfQNDbgu2Eet6#smjgFQ7Y~XDY7YYEet7g%}i0MDe@@_Eeug=DT*mdEeuiW?hGl)
zDJm@tDJsoOQ5q@MDH<u7EsRl`?hGkfDcUUzDcY%GSz67^QQGbdDLN^-Eet8TsbX0=
z&CF4{DK;tkDF!W!QF<wcDMl>}QTpx-DaI)#Eet6psbX0M&CF4TDW)lAEeugcDds5_
zEeuh{sm7^B&5Th-DV8Z#Eeug6DU87knzk=N@$9F`c#9*rq$o2l-P2!_@s?nGcu{6a
zYEGU@YI1&2Vo83HCgUw;$Gl2S##=0I`9-OkOt(1Wa}%>u<FivMZ?OjX<R@oqGT!3K
zOUzA8DJd<;NxjACoS2-E>Y108e~Tp}v!wVITW)4?acc1`_T0pZ;>@blTO7%yMMWT1
zh$dqcXJTndeo<<1YDqF9$VpHPVly)^FgSx!pBMuJLkU9(V;0kFh8l+140D;9L9xdW
z%&?NtPm}o;Ye7+FUP%!v0|P@8t9y{EE687r#VZ+#1Q{3@e%U!&#e^2878S>olxC#H
zxa237=BDPA6vve2WtOBDC8ngt6c;5Uct%J}y@JYHZ0TV0B|%PM0=bW|NSJ|v0k?ZW
znW}_g0b>orLdIZ*B0dHN22G}0OnC*jSc~#Y^HNqa-eRl(yA47JF)%RP;;_j{%uPy3
zw3A_AVE7EOrbq&tEg%<wLxlqzDm4tva61@pF&CHS7J-5bVlc@2MIh6`M#(ZTFcg7&
zL88lxM8E-?mXn`fgwt?2xZxlNf}9V+peO|K!44K+U|^_W$YRK1tYxZUtYJ!HLIe(H
ze0*|FVsUYN{4I|7cu;bRkJn_r#poQxk(5}R8lRF`bc+QfjOMXhLdB^ir3E1Q_@qjR
z8H%8YVgXst$iY}74+=yO2N{Fh1TNTA7#J8*!G&5BQwn1WQwu{Ba|&|`OAA93OA2cW
zTMI)JYYKZXgC@r<HlO_T^wgqckY1QsAT|ht{U^o1z>vaN%u&M7#0W~PH4O30HH<Y3
z@hm9}!3>&Aer}qqxA+{3i&KkAGV}9Xi;D7#ZZQ{E7T;nkE=kERExE-}l3J9TnU|Pz
zi#aDhU6bh+OL1yW+AX%E(zLYHB6P2F#K&jmWtPOptAjiYDytb7IT$$@tN0-<)q}}n
zI2jZzU?+pp8YoH@FxD_GWMl+I4-<?Hax^6EG8ch@=@x4_sBkD!1t;esHBh2u&rK~Z
zPE1clb6Sxm0|SE&$W@SN;9;!dfw~E721Wt|Wp}WHK<eQZgH2t@2(}8$h7h0xeTy|M
zr?fc30AvR!0vMR!Hh?AkG}($kp1Z{rAD^3_Qks(*AAgG{KEALtF$a{4;^T`nKq(gF
z&?1maAx48U4442}Ql!AZzyR`RF(`037&%xuSioY*u=t13AZLTv#b#*fDTR3sQ#(T%
zV+u<OYYSHta|(9~dkRMjV-yRhS^}l6b_N!PD7Ih*O&(CuSx{06DvkZzG?|K&K_Sdm
znwMKzk_rzokY_;!9ViO<LB0oNJa8&ws9~yMh-WNeYG%k{u3@fW>4O%MkVs^?#U2ky
zW<{V-yTw{uT98@<4h2nCNDzZ8gh;X^7Nr;8V#_WE(JL8?q(NbY2p3S$fqV)M7Y0T)
zMmEMOA?HM><KgKp1S8>rTn~=tC}>%b!jR2WBml}Sc`TrWSi=y{P{Q2I0LnEr3|Xv2
zCM9e&3|TDAj3tcmOnDq2Hi%WjSQJpgn8jYh5YGWpU&0vA2@VCW8U|RY!d+wta<&qP
z0DBE&`z_A+VsM3*nGg0VI6lD{Ftw=Y7F&FA35aHeP<+W?Kc&W}g7RvyCKDu_tUxA$
zLjz2JlFBX4`1sVklGLL3cxzCsfQn28MhQj^Mm9!1CJrVRaL#07tP(~JCa4a0<bskA
z$dRDr<PI{%2pmicK>3n!0n<W;1<VT>Ao(<b5gZh(@u1*gFVd@F$l@qrjpu}C9!;)W
zY^fC`sd*_yps2dVk)B#omY7qTdW);LASbgVCo?a#7(Il*KC=LM&k~W#i;O`6V6TD+
zuzxw?<5Md@g^wr5ouC?zfl-7J<W2On08ysN7^2Az&XPsCAmj8v1SlO9fx@fE9+cL3
zp_M>#YEDjkJW`G{1t|tuTcppxzyRWbQyvGS2rCyKSPUaaf`SoT!h>Q0vxE<3&}6yA
z1&<3Olqi7ZFK`rufTJLbVFBYp22d2#GNmvsV6I_W$XLr<!cxQB%vj4(!?1uABmycx
z6F`y0R>B?+N~@ejE;S5UTqW%B+%?QK4DmcQEHw=Ayr4+ZWb(Vk$fd~#j-4V~kPq!Z
zfxwcLpP6@yKeeKyC^5MtKCd)4DYdA$$OfbUEj^>g)-AUD(vpJGl3VOhJ+N#IioRQ{
z;P}49TvAa|<OVVe9F<@K9I&7W2m(bVD7+XL`53tvIT*R15z4`Yl5&moV3F^q$$}oI
zAcK&i4q<~M0|P@K$Ocf<a4;5u_!!X!D%QZ!R)SKHMlq)_MX{taM6sqbM6snaM6stZ
zM{%Tbf{Rs#X2vM4RL&H(6!sPnn;X>TZ()ey0k!*E7@~Mn#6T_n7RD&P6!8>+6u}n8
zDE<_Q6rmL17RD%n6v-5k6wwyOD8Uq_U<OU8TWmq81^GoKRXjytTA?JfBq!DHWd*1q
z0jj7#xfFy!ZUyC3aDfJ@CO`$43q!0_3{x#*EmH|&4Py#pGgA>;4LI*IG8D4GBAyl8
z;?-on#aURIS`2Cp6yIXcNGwh)DJd%Q24y|wTg=Hh#kUv>Z!se01aMq|6l8!x5>zcQ
zFoEK#iU;CoJ+Q$jsTt&CkW<0V1Z7mrrYU=7UU5Nca>*_B^wg5H%$!tB#wh0GoMfbU
z1X+BGH5VkA2eKLziJ(dsYAsj-!&-RgfSSM{YfG3w?Nvmpn7Ig)B8or>?-ol*WkG6@
zHfGwq#TpN?wg_%5Biy%O2^4EVR)X>`*teiWR07J6j46zg3@J>_Ohqa+3|U}a7E=~8
zNVJwQj|VQsQp;Gvn#Hz&9n>6xvN>uP7H}?Ps9{{lRLcaG<*EU7ewkaDBpDVmF5oU<
z&Ef%-2B0==4RabZsBG~o0tGv$EnARSk`W*O^6&ru|227wd_h^9rywyoJ3ccPl*)>W
z!azc-@gQ{|^&qdt$KPVgFTTYB7GqDzECMxIZm}1o=72<sLP0t>^74yvLEKxc;PU(y
zb8%{cCJVTRisC5B&o2Qrif{22rRF4-WR|5uEMSK;hKkW+Rut--cyMbQrl%AX<e<1?
zU=&~!Vd7!pVH9GNVXTsY#yVC_Zpon32Ff5H3`(LP433W&aF->EaRE~aLl&qMWmv!h
zs?b28SHfDuP{Z8J6a#9-u+=ccvxBNM7lv4Y7^Yg5TGkqdEY2F1c+NrtQ0QlK7Da$c
z)*2>IChKJa4FQ1L*qV%>%DgBuxkQt@C>oTQK$hNOPfRKX1y)fkNC*^w;Eq8QOL<CW
zQ4y#ORpbK-Ri-?jTinH|CGiD`l{xu|DVl8H04+)db%EHSf>DCy`9;~8dFh}KPE9V!
zFRCo^2bmiHBETgcm;mJ&a09CW6plQg;9!(s6ar;fCQvgS7K+ej{7X=hL1{99x_F=j
z4Gug|CIRK4g$%V!H4IryE)21DF-*10wJasfpmsqmYYo!^mKx@TjJ0ef3|Xw;fMHy~
zR>KBrF+kMRuu3r0ve&TJFvl|&@<B@@O~zZCWvN9;`Nf$fm746gI6(d4lGNgoD6Wvy
z;u4?y#FW$`Nbs<fWag$uiGo4~BvtHIl%ES?I42gT-r@^MttbKUf=cu9Qj2ae7nSB|
zazMf_3lz9)AbqJth_JfFT3nJ?R1(EnT$)*udW*Frza%jSJ!!J!CT8Y!fkF_R5}7y{
z`M_0^5F-y`6*n{tL26Kf4pat!FgWOzf$DI^A_ZsxEfT|2%T&vp!dSx;&sf7y!xYa{
z$W;SwTro0aGJxvyECv^b*bs=i5|$b!aM@8&!_>^MfOR1QBSQ&W3Nwh;%*4n5q8G5I
zu+%VRaV!M$K`FI{DT~X7q0lIWHH9&gu?8dy=GQP5ErPm(4`O}_8^|4nlWJIk88q4b
zLT+)#7bljb#wUXYQmR-A6*7y9au^sGs&w-56~KZD#i^+Z@sOTxVqS51YEiL5W}X5l
z-oWD-MtTL6ZkpVXIOB^i&n(G^PtGq(ElNyJy~Q36sn?1?1uD2eDk=u0TTpM}78k_U
z_=3_Bu${NqAp%B4pavVXMG?h|Vj?0wqo7qLL=SpI@}atT3MfiJjdKP@7DhfsHbxF6
z4iJlpi;;_whf#o$k4XT;2lZ%0q0x+L0!l#xifmB91CDS|#|;|DpvY#fWvOATVa{Td
zWT;_|XM)tjkZut(IFP`;yTt}^H_VqD(Cl6e&ix2qqWP-`R28oRc?OhKK~1MBA*gea
z3lb(bKTXD>Oi<m<=%&f&rYVHha=OI|ZfM+MFH0>d&dkq)I4lBG@Caq*rDT?6rj#b;
zK#SbsqI6J>;sy_z<ffKn<fnk!4MoAAs05d9DIgZOg$<5qQ1b=sR3zskN-0n)J^|zh
zP{@IzfrSwYnV1;4SeY2Pm^qj^xVb<;tO%qUrPc+N@E{Bh`~n6BhIEEn#u|nN3=2W|
zD593BhG_v~3L~hTPhqNMsbNlG&SohJ1B-%qtl|s{nQB>USW(1TQ^4X7u@tsimKv5~
z9#CTn($+5INnu~W463O?<1>E07**ZCweBy*8gMv=6gh!Ho23ZSu1?F$OS#3CSWp1!
z5`i45$yVe63SG85NZXh(CyKcsxg?6IFy|IyCQ>kg%JEzLMY-_>MfvGPsl~<dNr^@4
zL7@d2YGYtxVdQ}}l?51Oz~RQjSk#Q(Tm+>K5C&OY45}$o7;70p1$qr*2}2eWsC7}p
zw162jXjsdf!j!_?!cfEP!q5!wXQZ%XvlRJ&>PgV>X9-&hE2RI?%+$nK18Q}!`BgbZ
z>uSf^D`?nTfpJWVmO_l4LQIObf~G<ZR7hJPM$cYBQ$Z7?D%v$QRv}tjH`X4i${r<R
zLCrjnqe0247&K0j&QQY;t5VCD!cfDQ%~mXt!U&>^6iOIs7@HaM*g&n`X2yw3g`mQW
z=@z5jEk@9&F>6{*eqzZj=FGej&`_wJ!7au_^bqE<$;m7(NzTtLusaM2T#&tN3``7F
z%4nS-J)4~T<iwm}J3WMs%rA@#3{^((B2W*(DyXzl2v01^%gjsHQ7FmJSIAAwt5kq?
zk`y)BZ!s06-eL#Yo0*du#UADw<m4ah8pRGuFo`*!vAEQ{B5>)*=~|JTS^#RUYqCJn
zj%Z$KZfa3xa(r@rUNNZEP*Rdwlo!QRRB?+rF$cXaD{^6AVAu!p8)z&PlyE?8cRofQ
zMo^^&mc{51fkFY?BLWq`@Qy4`3TQxyF^U<|>xp84_IX&LeIB+HP=esS#pjp|YWxL2
z6K=>a{@h9hSTa^fN-XkA2IX5&(*%S;Y*0Z8PNSgmpoSGZLc<4YuYvMs4MRK=c$@`P
zdoskcfP1y9Dd09+340000?r!H=o3R0n>52h#%9pyCl|QS%bfxm)8VOMDvGILSilRa
zL&41tW<R&|YLRvB535@2HTjA_^#Nqyiv`pWzQtH_izB|MG!GPcMOC08k13_3s2x<a
zfjX*1q97s0yjz0B8TsWX`STV_K~iE-5vVYNHR2|Nw1EV`CKXKwaVLQYa5)GjKv^B!
zWIGLtCC~soXiOC}zy-q+py4hi7RD-Z)S!irdZFYWP?HW`E17@?h(SYKpbkfoS_xAY
zGiVeig{c=5P@pC*>jE}V(3G$*U|$FdFb-%Sg@D?%C5c7psU@16MWCt^?EG7tB^gDj
zi7A<R=}~MUU}h1hdv=QrA`bRGq$1}5HQnOD?XA=lY~3ud+FKxRgF1<jQ9S`hF2*Vm
zG>?MK!0pQ<P-BZBi=mjW1l0d!Y+?i_4^TG@(*0rvHTpqAC0U@v#aZM98t7ZVmck5b
zr-Iq+pfCU@7*LpSfVy9d3?*C(xIkf&#Vr9EPG|9}QVcH1FDS^&OIJu#C<ZZ8Q&7^j
zo`M^=BGBZ4gjgRauEDWB8^i)7@}e1_1jJTakdg>$fF|eU7pE41(*&rn1BYDE9FSsc
z{ZdZoaD7SYLr}1RD+5qF29!b=A*~oLMm8oc#wrQ4AcSi~2}V$1%L$wG0=1Y+7_vaY
z1{&%E&rX2y7HD=Ng|!zn^2q|K3$hqf*d!R58C@7+%|LZ-7F)3oC{ckD5U5vF!w}EH
z$WWLD&6`}%KxSFModWI{6zYMK7(1lL>h==UY^YML&H>eNwbe<9MYS~w(bcIri3P=}
zDYZ7$MX8{+c4l6BZLB7HQ6VU7K;ad|Q2^>cWaj5ZvF4;EmZe6qq@)%nN3rH5=jRkh
zaU~@d#i!*L<tCOuqvRG#NnuKE(KMI=kUR;F4%VW?y!2E^oXrEt=YS%KwKyj=wV-G!
zNMJ6gVuy~#f~r!qssx+|-h<)<R7fx|iZL?%=U^6MWMkxFWMgDu;$f^3LW>blUxg*4
zXd(jxLx>-akxFpUF&|_<D32l8i6}ZIFfcHj0$I$#z`(%4$i>gW&cRm%61~Mx)B^TK
zc}8YQYBD>hxP@YnT2RRgN|42%;eZ+zq*9lqhP8wV)K@8Cu3>Culwc@f1`VP}Ff=oX
zFx0Y_u+%Wru!}RKFx9Z6F!wSuGJu-8<_xurC7?zxO9~6TcgzZj<Dw-D3=AuoZZU%z
zLZG-?$p}vJnyg60FLQ2UMNu!vub`Swlj9a^T4qskNfb*?VsQz$@fpPmu9m@Cqu5gN
zONuLVqga#kbMlLBai-*hD#M}@jDZHW6!2u~A5a)Udc>e1aydpJ&|KXbP@4%FC~yi?
ze1bT|pi&QB0fMSNrYM#a<|x(_mMAvx91Cc^jw6aag(Zc34kNfiWQpPc%W$P|w{S#p
zg4sMNye%A2T&dhyJXyRcEGc|*n7|7GSfcn+AWH#2!-K*pA}tJ20$?4WWdIyef{;Z3
zQ9_W(|0rSb%#BDegQm<a?od$QHz>8ZG^gYxC_%pb|NsC05Wi$l?G17~2!q(5$`$0H
zV$ft^2}3hzmYA`IsfKwWGb012Z57W1%8szU326KuTpiuwEG|t>P6bV*L&tc)!yKuJ
zxwm*g`K%}t+$<?B0u>uYpb>IN@`TrC+>8ti#UK}ehH|PzklYKexIk4*QEEwPQ65UE
z1ggS78o{X()I9*T>r+5w5+k^`%dh~H_P{MzP<a!~pvmkP#p)Uq<R28p;^ye-qsbh_
z2aa^`jHgd#amg(<$b?E1M_OWLPH9nUF}4cn7B@myF+alne2i7%$nHl{h0-Ggg)_+E
zAi5Ydg9%wfP{Ys+YGyOmFhQK3#R8i61+NDIttepjbJJwJ#SRVbm8?bJILHQNUBmzf
zIHGTHC8ngfK|FAaCpkGDUaGHT29J8&VgZe!++t2GDng5rTO5fgDd6A`M)*sF5j9?5
zN+|bK(Kb*<-T@-^fC#kov5SF$K^(;$V#wi(@TDeG(O!`L&7gb=s;xlD8iYY?P%wkj
z2&mNr>U_f{l54>8CXloNOV}Gg#(;*!igtqwBXD>@3<npcU;>nQ!GRnMN&C_WH}Wu6
zp}P^P2xSxqRH}k7*j*t1gWOfjR>HUdG=v6jh@?PzUJO~xS<uE;4MP@tEprWHkx>eB
z9#aZSElUb(2}cTOqK&hJtA+(!26KUCexPMAsFA`@98<y#(pkeQ&X58cyJXH{P2m8Q
z$9X~}+%>GIVl_-DoY|~JOKMnBxJr0xn3@^I8B(~#8O#}KSxUG;EoP8=Qh4BHH?LpF
zOHe3Bu>?2<2S>4iGM1|gxJm(cdclQy5on<6IVeIH7#`UFVp7of#i>x7nw+1PQmk3D
z0#s@8g4>PYB^^cJ#xqlLauK*r1JwlJsqk9@nI);Y@!-*jcyQ(_S_z6S(1-(~O1Q;b
zTv7yUS#JmRqghLeQ<IBtF%=hs$Ms55i&lfQfuj;kfZ`Jz-}0b{1SJztD-$%1$Hv6N
zq`=6=D8(ei#K9=SSS5j+0Fkp6O7wzqH>mOiM=z-Is9|7X$YMxgG-qI9h-au_1Wnq3
z1|PuHsRk$^f`$ufnNpZam_hko9MrsGs9{QD76Y}mSyPy6K`nB&Vy_Z*P}e346t!$6
z99f(xti7PAJFYw(a5S@~u$AzDqm(y=LxQ1%57cf2B@9rdEY1L$+y_M+xU0ku(qF?6
zFHj=5KxiRD2?u1BR+HNg+!U(fRnSr>RL#^=&D2y-Eq)1VeTQhW6oGpype_$2#=y}7
zYC(Xb`z6Th;5c8&ShO4zvBFUtpwgx|s5I}EKygNXQHe`xaWb@Ch~i66MdlZ+1(j!P
zkUntHaZogY+r^-U3ZfBEbPOZ`s{hdvKon~{$S&~g@hzsz{31}wgw{*oOdQ39NJ3G3
zpc$^*#FEsMcu*e%QZd03#4S#6e*!daYzR*lOnginj2ui{j1r72;AEl0#0DM*L{1iP
zeJE)H<P=a*1j5CjGzQA@pd|yKWdaLWK=F@U6tRLmf#hdUaes@WxFo*-<fNhlpddgi
zFE24LFt~u+3W@|!{}8p@hUrDAML|kIl|MMCg2tsl&H^V(JMcIVs6K^^GJ|HQn6j8_
znetdtm}{B9tvFC=#hSv>3re1#d1S~$A}b^T-Qq1o&a71mHl=x>38NGRP$cOo1gEAV
zhKBVDD(y6x(Gr&?`z@jPf}+&2%>2?~khh&dYq20>!{DVKmqDQbP8{H50!kOq=#CNr
z58B70#W7d|NEv!;^FW4hgTV`)JV1d69@PhrpYbqqF|jZSpyhsK4JbhhifK?Y8XTmc
zx&#`e97Q#t{xx{Ap0R`(6z3_R$vf!O9;j~#np!GyC;<&tFlKRq2HQZnwS+T;rGz^L
zl;?R$ctK`lal!I*4Py#hHfvEy3VR73s2MKKkisF(kjIq5B+dXDjsvyYG&%i>4uL{y
z35YlgT6@T-0M4#OpaHF-BOu|Upv=l&bOpo)2N1aM1@%+F<uOu91ouF};SJ7$;GzJn
zAU**y4AKvZVgsduT=b<_xA@|VQj0-LEFnQ2j2z^kaduFTNPw|Q3OQ$?Y5^_X5JelT
z2KNAp&VxJwb|Kg&;KAmtAhG=*0@SQ10yUhXSdu~WLmc3gl~|lwbPyyBs`!g8f>@yZ
zRCE=@x(*^hqg_bJ2Aui8RR^fhVg<P$w6Y66NX^8<!pFkH!owoM!X?BZ%)`RL4KCg=
zM$$kf0;p~Tw*_Gx32*}+wD6AuvG5PvuxAZs&}6;E4-OWP!y%JeC<zdhr$HDLJ;k7r
zE6`}}LWWvq=o&1>6vi6nOvW0f=}fS-Sei_JMH@kp#!_?v<X2D{hO{t2fd|b~kOeoO
z(IT)~w6-I-KM)5B29Vw0;s;V&B3ub?(xW&RWEZ>$0yX5pgL;f5OrZ1(8ae<Crh|qK
zKy}$dX4o<+Hl!p83c@JvB5?B;JoQkt3FHe<rvtV)3gkPa*aJn}EpG6dVMzBV3FLB6
zn~VWcq)VfPupW|H=psJ!6+@swsv?}J<R$|HLoCP=Q1Eat@~{+vMBGp+36K=XFCbhD
z8n;PdtYvCvXlG1g0u=@=9JS2g5p|ZTC<Wid^vq=Fyjyt@X#SvxOF=<FA)}<Epx8=Z
zKRvS~qclk`IX_pwpt2+*KTkgyN+;#yC+UMmOZ0s*ll0R{^O8&Q^K**z3M#<{_@TrO
zC}u#VnLMax44Uf&b+Z;Sq%d|cWP#(ThN*+GnK_L)jS197S-=dM=`D8YV5nh8W2|AE
z$5g}A2U&^)8kA)L^=<M%lbWm`b)cye@bq{Un*(?TQIqW!2bd9`nF7h$peVV;519c2
zjb@i5X66+am4Vs`ut`DC;3F%fr4CwCR0K+)x0t~z!OK7^%RplaY5DoLSV1c}QXq){
z9RKXlaLWTl3#8XC!^j1m;^Shh5(7H~F{lq!4A!s7QUn@XD|!S<W+0tKpiK&3siFrU
zF|hfpU=uPyrh~k}z{0}F$653Q)Jp;NUO|~2gh4?JYF&Y29aQLaFo0`d9#G+3H3^h^
zI~W!+b~1D@EC7$UEo1^stambi#@y1GKqD#(SiuDbc%-3*5i~oqkP)()S(2faxdWt=
zZ6U(~c2J;~aDe&<px%HuLkdGHlO#hYLkh<N&J<42y5bIo1zaf{3!!FoGQezy*x11k
z&JfJN!cfAU%~f=Uk)f7_k)cYUgeQfogQ13{nVFFxoWYbKg`t+EjwOO2k|B>Nf&t_U
zO>Vy`W+f#haMQ4gEh96pq}Zy86+~O1BsW-LAPh=wpuvcA(8P{VEn^3G)(+BlgG}Rq
zLtB&S7Hdv^d1_HnJ*fC%D(V2G491L=j729w@c?hn6@jMe8$nS4%4FdA8`<>Kl6a64
ziV=~Fn7IKrg27XDSRDwSd6EN9Db_H=iq$gKFoLEP7cizUEMzQZse#QkG5%t-0yz@2
zy0hRG7r1d)44S9bgt)b+5$;S>x3+-X3Q9I0xAHMmaV90ED1cq7$qQPMT~Pu`khj=U
zQd3g%N<gb{3UU&YQ;R@5D8T)wD7M6+bkK0wQ;-dy;DPj#K<$zu&?HI`c;FdSOK7r!
zL+cheLW*xOW)(dH>1hQKU{`@UZ{QXCw>aT}Rt0h(sLBJC^(>4`;GDw&UR<igC<UH6
z7ho*<07~tkRE~^6DIUZxR$xS4=N`qJ0$PF07{!{x6vdXp7{v}s;VleNpe1#nv<+HD
z2U=1GTB{6VbAviG;3ak3DLkMhb>LjUo5I(^5yb}{J!6RCPZfY{q-kc15=<4$5`t`}
z0r9{K>=>eiQ^ZolTNt85QY2C&TNt85!KO&1NVjlAiKWP-$hI&<iKm#R$fd})Fh)tF
zD5NO1FhohFD5WU3FhohEsHCWZx9gat%B0GqFs7)_VFquWVU3c7Y@LacO95|~iIPvz
zOVMv(h*ALCV31<i!V#qiW*em#w{S!$f!QW0;w>CeBH-#?IhaAy{1#^@s1gOOX7O{w
zNR6Ok0~Dv=dC(Gu8V2y{J{eFuF@+(Uvk1(p1<iqin(yG@SJ2E3ct8x?%g|&jQU+xn
zR!GYlZ8axoJtV9W1$Xwq2?9)j3psY^V&qAn<N)gRf)?Czu&^;!3Bp~i2UUaOc2E%r
z%I;vd!`3OLFlKW?7Q}%1$e^}z3X>#wk!vuhPYjwpWUi7{2m+4>DwJfTD!^7MLe@hn
zK*o|aLCebHQ}UDJ;~^OWG}jJZO|g=($Pb>Z5x(Gq83XCE%m#S|<TwUqaG$@p1Qhss
zsETm=22?JTfZ_qX)E%_^Jq6T{X9APV-~y4!uPPWm`|GKYn5&SNU!qW4T9m4Q2sZGl
zNexYf%wmPS)YO#J6otgR%G~^-RE5O66ot$Z(1M%XVuiBQqDqB~#N_NsJxvzKlrTqp
zeko|qI<=@0<lAx(@dZSHy$mKmu?Jq9u>j;*aLh4*2EExBt3b_5sF%S`K=COkSm0?6
z)@3X*D}i(s8Jn4kr7}Tr#S{-(F2In(0xla_7lJ|nG+4z3nF6X|Sik}5^MFHx+3yxd
za()@Oz6AG7G&zevt0W;Q613j0=sT!j2PpzC7mDI=hAkXoPR=g_PZCD47N?ek7Qhng
z4@~cZ*AeC91cQ_<2l*P($zuc0bc5CvgQgV`!5^QT2vv*{0HE<HP<;(fxVIQVyN5tU
zKO@M~j1w4(u7L(_bC`0OYeCV^n8KXHlFM4l1eS+;K8p?HeNdArixV_~7{d%&O<2oX
z!@PhSv^r%1V{Bb5TM17V?*cy1Qn^~T8b;6<UoCqLV+~s~V=YGwdksexe>PiDNQpoV
zM+!?bQ$JrVXAS29!G#R9T%dY5kRga6f}w^BG`v&Gox)nnQ^S-cT*D2TobP3-<t-7Z
z;cjLu5vk#AX3S<PK2stJTCuS}3}l}RL+qbgz8c;dK1qfJ;tLsSc(R#_uGKIskN`zw
z7H5`ZHv0s|A|7y5b7o0GWNa9~E|#j{s9|5oRLc)q9SM_@WT+KLVXPG_k*?uO;Yi_V
zW_DqS)u<Ir;jH1U5tL-eW}CoR9F@X_;1yXhGE87BOqsw~XqCd4!kr_ND_bkX$N*7M
zD_kR#!m~gQ#H-;0wPQdt`!P(lBDJhF{51k#_btg}s1c~)w_&K^1FIET$e6|i5|L!6
z5w>BdVOSs!ad`@F8fY|wGfNWGL@m5l7y?e1e9-cQ@fJ%!Vo8Q3<1L=zyv%}v)S`G$
z3;GsgQ53toV~~@hyDMnhAWJrAE)cW^=N5BHYT7MUaBoDD@fK@(a(-?xxH(Y-N|>4o
zpmDLpBA5K~Ja9_C#SU5rnv<DS^cys2$qu!p2s9&A^b?f!SwI6g;QbQuDTyVCMc`Zk
znih@X%S}xLl^iMHb;res^`%AeAdTz=iAA7|g|}Gp3sUoLv4Cdg!HbWgxRX=!it~%&
zlk-zjA&mpjCbcM`<ovvn)QXZ~(B3uBBBZ>0NGW<7l%+r!u_zA2S_E<_AJnCKIjQN1
z$(2P6j0_A#psWFIq-%;o8s;ERf&Fre1?2c!ET9FwMW87u@I>7$7SPJzDAweR%$$^4
zjA=!ng(yX!jSf*P>4`;2QEVV*6dQqCOHs^4sX35AK+t3=*v2U4w9-6K8IlX$LkVsh
zLI_YXcZ(OCl|xb?0lop0^+CH0KrI&zMjr4wDR8?VUJ!s*6ob0*e2gNDe2fB&LW}}T
zAew~{w3Z4qK*7N%1>TGx2ATGO7MCdYpp+U+3=9k)3@SQ{LCq$}G}}VZiZsR)hHQ=^
z3DELcP+<w{;xa?J;O9V*$PXUc^aU@e^GnQ4g_d%+n2J--I^wq^(1%%rOA_-^5{pu<
zf&2|F=)o>$W2{m_6n3~Yp?Cu1cu@Np6u;p12BbF#?xlkk)-i#4hG|TYMd{E5(Hb>O
z!3;Hw!3-;z{J^v6n#@I@X|tlcpfm(ZQksx?sUkj5(gBYrfh#C50gApNPzc-sIUdv=
z21S|(WMl{$=;%&IaWYCX1LS##lVM@Z2sRwsJRjJt4?(tqqK$zGxmE;ELZEcrL2BS3
z0~y~1?T-SrVp2dw4tP=$RHPIMFoO2of^$a^*!PfuvZDK--~js>On^NO3XG=+C*bxN
zqWzS{R3ru(am<1wDmicg%>Y_Z4BFxf?rj%+12y-Uioj#2AT~Dd-{Oe}HT9v*2+$&M
z@a87ass#?nEEhENk(HqY38<A1!r;VG2VP?jswY9q#6iIVt`C{OO;jV$D(4#Ts(Wx?
zfmbtw%3-kM*`P8^X-qbt<$R!#EO2kIhDidnDw?^50o0^`r~;SiAQ6^@3@OZMOf}#k
z2T;IkvOtpGEw)7PURUrc8chx`bxR~ZH4jvTLI)aBD>92qif{2DWjdGqWJH`n5-_+`
zB?yX6$nZlH7ieS~l1ITq*|(UBKs(H#t5hK65j=9iqZ&Cmpq4#E%R5j+gQ6Xj!a=PO
z2`13m5YU?ADoI2}g%&ZzB@kUGX#$i#K`9X&<e&j0NJ;?Z&nza$fB<59GczOsfEMr-
z@q&B-$%eNWlW#HRqB$0vQh$LQ2+M+?{d5RNfm?3znJE~q0l5g&#Vm%5>@_oFG1f4G
z#)Lr84{F~tGJq!S<6*60Nb8RUQu2WYA8&ETgVyImBIcHGJS6fU-1yAAl++4X@c|$F
zK`Ri!Ba{CTZU?Q?uM$GI9ljqKR8)Yvr63GqgWL`dNzi&+P|pgqMW>kwTu?J+fs!s$
zkrHVB2GrMzXM(B#ZRfE9iDW^R%ecT*KsH=6`ytxon$SKMI46Qy>bF=@GLuV+z$pvM
z+O8tdygUmN14A*WBx7KdU<A!YRmmZF21`1|I#Un!7_6WIHN?SAW&#hkGA)GffoB0*
z2}vnn2Qq_(+aM{W=pLwMVoym;ODxSPK~FKD;++fRR8X-9O48uzC|IEZNigZDB^Zu_
zhdPSmK#eoTET$Sp8}NV<Y{DKC9uUWYf((`tK<>g?o`S{=`3ShFptJ;~7z5=e5C#Vr
zXhH;1fq)k@F=R1jF=eyCDidb3z=BrN;BZ0@o;wT-3_>VQLCxdI8L9B`PLwd>WME(b
zWkS%_o?_5CG>CgZ1t(}b(gMaB)`g74IwedgOf{^ajUeDclsSu~h80{mvV!Dm!96kX
zo*TAo_M)H?rWEEH))bZ+_GT7Fh8oaTGxikL8ur-?DQt6@7c$jy)G*bs)v$w2;HY7(
zVFMM<;CXGf6n5~uHmHNbRKt<RRKq-v39-#vlf$n_2($=V3f|pKD@x2w1?^Q*0G+{+
znU`LrhFcCg#0}2rRV<k)3bs`&-l>%eRgT~ua~kO63eYYSaDuW{K$s0$s9OxPK~Etd
zCpED+RiUUfPa!3>ASb_4LlZp2r^yK}+loN(0q#3Pk`pMcLs}@H<sC(`pr#2Z^=NW}
z8?;ee&~hlPG_MG>%K~X~1f2RnqrA76GE<^B<5Md@n;27}BhQHPpBprQ2QAOUKq&!K
zL^3de7VUE}G5u#_Wcts<1eyl}t-=;z1~pXq7^_4Pc^yf=o+dNc8Ml~oi_?q13t>R{
z2vm}RFo+GWieYIOl;J^TF=&k!sDgy#DW)ROpj;F;#DA`!lL=ti3hYlsP)LA10VY88
z)h!+*#m<@_uYfWP14=UiSqVxl0V-)h=^32AK<yaN{Lca=&<rVPA!#$?LdJ!RDU9Nv
z?loH$cuAo+Xr(Q)Ulq4Ld^x>-6^mX$c8(?oQtt=6sSdQ0Gm0xcwIngA7?dQ6Ks%=)
zaSUpOV{Q6yL#r2%dL6iTn2^IBNd<~;Kp_pvp<v&DdS{T_SqnaTV*xYhNCD8yFUU{D
zdL@htSdm3Q8@Os1ve>~RcnKvOHH@GQUj1UVOeLHPxIi*l+>rHQi0(Zo<}{i8z*WXA
zrt&0Bo+4#X6h(uYli*E;teM52-GrdfxWxfFB_}1bs2DP`0<L#JbC<W+3Nn+kb5d`y
zq?G0sfDMDj`YqOEP%sp6feZylIhX*&J9vYZ0VsSxQy>iBB{D)xpzah(NFjEOp@bAD
zJ;H}kK#M3rZ542H5WEAh20ZFl%LrPH4hgZMwkn+x@czMOrhb81rV>yo1>W$E2o3P+
z7>;;`aE8JOEDR-_*<3|U&}j;>60Q``oFWrw%{FK#1%0xja6$w_9#;uh3TQzqZ0jbA
zUldbOQWd9DVhW@rs*-`0Rf-Cro~!~Wa1bStCO4#VM~ebBNQ1Hn6x2oPj0_C7SU`;$
zNHRnoh6knpB27?GgTtF0#6k?iBSKz^k%7S)6!M@-k^!_#feBPTFfs8kihy^iaWTrE
zrdN=CkVK18n}8--Ky3+7x-EXj2s+WIhOvaPnP~!J@fFZORtrN76KFySG<^)3I09{Z
zn!s3eB^}iC25(MGVMzmRl44lE0;*YxzSMw@9bg6R_pD)aVTfG>u4TX~vzdwlYS?3#
zYB_2-K^ui?I6(DF4d-lz6!y8Sp!Eu%Iz|$-IV-D{3&O79l4PjmE@4aIsNsU0u~ftD
z!Vv3L%Y#r+!z0NM!&J*#%U8l)!`IAM%U{Ev!kNun^q_`i0Y?pg4Ob0M8h9mt30n$R
z4G+wu!WD&Wg#n<AU!aMy8s;p{T7epY6wYkU35-P+pw(7coN&{?Vjy!t-Iaw5Dcq6_
zY0T-2H7w$w<#9})GzANt8WtCZ*p3+HTK-zDTEQAYsEK(gJT-!#;l9}nDLivQ%lLR}
zh2XM%HC#}2eKkTUeC7<bLN!8gu|+lDod$VKDZC(=61Eh6P>7YVr3lmri8G`KLRNf~
zu$6Ex;HlwH5n2d7k7@#Ap;rlSGs6Nta9IyZ*POu&n!;6@3ZccR3MCnt#R?_)3ZRWD
zkm)6b{5<dq9R=tjSUm-lYzs;#pcWl?0t{4wf@kPJ+ZjPKj-W;{a}j*{3tXjuTRxi1
z;HkMHP-4EtSOyk@5a5yoG&|=HO3$F0fQ^BTfrFvS5K+^`r=_Ms7N!=5<U=OxvCi3n
z>JsqyT9uNWf@7Wncs;K|esXeYQEG}pc}8YVszPyoPFZGNx|ITWm132qo<c}QDmWV#
z>qE9I7AsWdmx4#!K_Qcxl3AjVSd?m2#b&3VprEDTSH%fpC@3iCDuA01zZlga6Pqlc
zhR82QyDC0A1rN|1r-DXWQGTw%FGkHO7LXoI{##s`#hH1<C5d^-so?b>ka0KAKmeo(
z0%|sc5Ap&}ofUx=5EY4liUIJ%-fPg18yl!Glve`oBi$0l5;mH`;6}<VHpr5QqAeg@
zOsPdh??9CTsC^Q}<m_AoTJIRel$=u}2{IovxdR$nO9l_JLB{4l%d?|cK(mgwm{anT
zZ?QoF2r@heUIBwBl0eJkd_YkM>MenqF?>vXjF3HuphG)Ao5{q$V{M@Rmk4NEEfZ+>
zK4{7Tv?&GDk>O+HW0YeQVFZn>ae(*sSIOc{1SlyJ6#1Zb2PlOWgGTmYBW%bE>zE*+
z#si*+1@*myO7kFX05tEIGBPk!fV=}Tl7R`lt{2*QLDq}WRRGxz!o{E=Oo$867xzKj
zPy|}r4C^77fMNj61t9ragbOqo{WKZ%G*uw$B6LA93GTXs2XMg6RZtfQJeUj_8~y}R
zQUfBsf(X#@97VrCEFO?$;2}8Bz}hX2f};F_)S{9~@QBwfw&GOKZowkZzy{cjkoGaC
zFAnM6fKw=_EeuJppypx`s6;4I0+|c0`M^83L3LCSC{+}J!V>q&n2!t$4BepaxeIu6
z3Uq@nBMTD;ClezV3lk$38wV$c8V?%>c*8FXXwxq%2M-4;4=V>77Yhdn3X3pvuyAm5
z2=eIguoi(##7G*T<N<D#f!qo|jtN`_fzA;Gt%Ct`AOxsX0-Y3?lG+510>&asMjQw1
zfnyxB)~X${S_iyztCO*VA&a4dF`famSf+z9p0NXT$RTspTJS;}&}tpXL}sx<qC#?h
zL8U@|nnGHBQEFy7;;2I%P_dPq0h$BM1kLnkCMV{AikVc<tRLK*^2ACFElnMTRPatv
zg~XiVd<D>&pu7~2*?K9d(1Q~dU~vs9*+A2$pee4zq?}ZR%o4a^#RZuq#R~bQC3;{X
zl#m4F1`r0d8;U`J1X}6S%vj6V!C1oxTH6fk3p3v0gxjjgRAd2a^e_gkWV$7Q?!#L=
zd8y^_qa6{!Uu4I~z|ac{eNd&qz|6tO##E&TYQP4ire}hV!ph7`hwDSs2gs&rGC{Hn
z=;T68#v)r#3I!KgHXs(r{kJ4=nb-!h8q_voVCUlionTjF4~l+}??HVk5C%m%C^>-^
z^??sr1GQm6gPSf4v1+wUB}_F;;I=$h4QNj#V>VL}7wFgqP`zEl6vn{FP{;*7-G@1G
z3nK#qIJ8kxMNW<aq{Iauq~PomtN>a%qTu4{>>uP9;vb}tSPYs3hil9)1#R}oFD_Al
zhrJ#bTo$1%x3suKAt_ZMQ9-E)5tax|O4bVbd8u%X$%%Oipioi(?NElBjG|Xbp)4~I
z#VsHW*dj^+l+!?)m62QqIdnz=dfpG*-kf3uTLsYWYYk{9glCpyz++s)JvA>Cd>m*X
z^t2)!un#mf;RdAT7b&EHa!XFJ6<iP$!r=V_8c_T6(o;){^Gl18Q^B)L8flsenhMGa
z9{J^<T#}wzqL7%AqmYrAl9HMSUvE$h$sC|LN(MhoR?rzaprO0?_*-n?RcFv1mK7+S
zfd)V|nIQ`gKmt*GxrvoYsZgI_jtq!k^X_a=as+jl82CWjvq5!gkrQaxmId7S@B=qz
zik5<g_8HwYO`}-zN^=V;!ApT4OC`HOW&bP?u@FRnx*?FxGJMHr6hEjx4I0GO18r_j
zEh+*f@LOypm7wjckU}2ZpXLDXr%TLDECTi7ZgGO66tsx22$WTeKnn(MaVO{J<fJBp
zV!F5pl#RfPZo$LtMWANPEv|U@nXFMPpd%x}&7NEAxs~AKNN=&^7MJGUV#`efpYSOU
z8f#?BPs&P7E`gkm#{x<j;A7UnhjxLFe*m9v0N$|<-rZOP+I>;96XadcD&3-75DT>K
z61)lz(!#m{5(Cv3;L;tO$HCzWDTu&@1(*OA792LY;A0y>MH2&fQzWQhKwwZS2h;%u
ziGT*~L444b0U;(HCJtthC<ikKs{p@%i~v8MKL<Yt51$bir%;3t50@Z^pin(S1qTa<
HFb5L=obZ^m

diff --git a/src/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc b/src/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc
deleted file mode 100644
index ebae664e67677e245b6a227b53dd7158fc9deb68..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 6853
zcmWIL<>g{vU|`Tyuu4o;U|@I*;vi!l1_lNP1_p*=0Y(Of6owSW9EK>y6owS09Hv~R
zC?-ab7;_GDE^`z!n9Z2O62+3jkiwF~n#&f&1{GtEVoza6Va?&l<&5Hlig878fz9KN
z;s(<^Q9NLpH;OlvCyOtIErmUmCyPIYqn9~KAXOksFoiRPD^)0kF@-yYr<awH0W8Lw
z!k5CIBGAhW<qM|pr3fMMg;V%aM3DHRDSRnnNPO`Wz7z=vKSeo3Iz^_1F-kZ^Hbt(5
zAxgxZAw@n#p@ktuA(cH#w3#_dEJY<nF-56`F-kmzF_=M9^(82*{4^PFag-z`mF6Ut
zq=MKfi6yBenYkb~OKx##t|sFx?$D6bijttz;?kUwDpA+6#GKN^l2nDt{L&(YqSS)?
zq7uEtT}%uNt`&*71v#l!3S1%i3Pq)P3W+&63MHw<CB+Jvc?yY8wN?sTTy_ctl_eSZ
zc?yZe#hK}Oxv6<2hVdx{dIgnSFunPCIh6{9rK!awnfZANMhf{wNIX45d}b)<7AhFw
zu>&j*GQuyvBvqj#BePhcIJqdZprlwKB|o)TAuqo~A-yOu1=WiRA^Di1Itr!5sa8mi
zfrN))JXk;2KW>?Mi8(oyIv`^~wk4J*WTqhpo<e4^LP26ti9&vwLZU)$eoARhs)9yA
zVsdt3da9<5LSkNuLP<s{H29!SRVYd=EX^!RMQAO_NGwr+>e2yg(9KoIF8~K$T7D5M
z#B&oX6_Qdx&Q2@M(c^N<FH*?ND=taQOHS1RISk3Bw9K4TkmEA*6kPI?OF^+zte>1;
zT2!1G54A`i;$P$V<a|)nm!#?yR6<;wfT}P7WKbf^5m4P=XXYs+XC&sOrz)gm7NsVa
z<QG*cl;kU@BWz3nhd@zj9&$MA<|?3Cpa*e@As1J1YN|p;Nl8JmmA-yTesZxMB(3S?
z7p3bP>sjiT7H8(A>nG=?<YeZh>SdJV=5T2;-{MY5g(QN^{JdLisRfxi`RTX#)AEaQ
z6H7``i{g`W5{rwgm~;z&G1^yY_=8esDl8{}lMp<uYNYC=>sf&lnWiSoE!MomqV(ch
zEG3l%skhiuQqvMkb4qTpWTfU4RPpKR7H8y_r&biCCYPk9+~O)O$uCNcFDWWbtx^ci
z$S()SBTQO9CAB!SC^bc)2%Hr)nQpNqCWGR)h?jwZ0cIE|CQ3_EQ>v8U1|;Scm!}p%
z(p++WN~!`>O>q^6u5M{wZfQwsl@?ev#AHxPEGWv%E78!?)6)d0&P~iq0q0_n_9{+Y
z-Gap8;>4VsDig=jlKkAnlFVdKB3A$jBeK1oLTGVnT4|0#c}8lULP}~<X?l8QUb<ct
zkFIWBenC-wdQoa|ag~@$W^rOtPO1V-L?J1$s94V}B$*RbL_sk>0|Nu7M0W<2=t+zW
z3?&Q;7#A|sa@KI9Fl2KSiPdmsG1YR^FoJmzHJn+@DGb@1MPODfV+vypV>Vk+Knha|
za|=Ta;{uix#)XV749$!&Otp-)OnDq7tTl|yjJ1p<Y&8riEX_<sT_x;U91A#W7_zt)
zG8T8$uoicfaHp_>MR_2iDQwNmO^h`ln>dPQ)Np3;)-u<yfL*YxhBJ$=mZgNhhM|VB
zhM9*Ue_IU)4?_)83VRJp4QG}>4QG~M4RZ}!3P&$j3{x#vEjPsN6BvuQYS=+!iBJk>
zGb1BIiEs@!h_2zT<tY&=5w78BW(4s|gi^S`w8#R{5|IUB3mFzNrf^F#WQo`Ef?X+5
z!@Cev<MFgINiwAHiZj&mfyE_j_!crwU@VF&5lZ0$n;?|JU&GhT#K=%0lp+8Uso@G{
z&=mB$#i*Ce2#o;{#l*nCz{0@55Dbb15e5c^bcPy+Se{zO5(ZG{O=K!$31(QypvicP
zIWw<Blj#<dp201~vLZnS1_m(k%gxy;CbT%Us5qviG$S<zp3h@S^D;}o#b!)#Q8I#O
zgkZ*lGEHhxv5{Uu<t;9ooXp}9P)fEl1UUxeDmDfthAM4XzKn+y3Gu}xr75X-C3-eF
z`N@en#ddlKolznn13=j&9+FAoL4{tGs-1#sQBi)8l|o1cC|RcJ!jvgMlqrCf>BFmw
z)QZgF5<LayjMU_8NIruWagf3_Cnph<Pu(;*Z*joNiCco8tRD|c^YKZEMYmWBGfPr)
zZ?Tu97A55ur`}=*<)_4)oLjtLwXlK!%7wY<78@iF-{JwQD#_1@Pc15{5(_BGFUu^>
z%+E{AQGkX?W`5o;MunH47=8Ko|NsA0obZ~eDjr-NLThzUU0<w_kyw@rs<#v>Q%e+*
zQd9F3ic-@vi%U|AQd9IG)jX6B@ie6PDArLZ$Vp8sP6amtKotrow5+O_?d<HTG(7Xb
zg<5f9ZmL3VVqRWqkwRiINV`H&B1kH;7#z5Ino_se3KENoQ;TkKf$4Z~Im(47IBqc(
z-eN5-$jK}z5@KLrxW%54Se#f=Qgn+czxWnQL1Ib9Ew;>Jklj(Du&{zgb1^ucZ*jU-
zB&UL_$Xo1*NyQ*7MRK5&Dh)D^9~x+>(1a8P4qsR#KzJ~7Z?S?4%py?!xW%5CR}3oA
zZ}Fw4mgJX~q!y(WCFZ6U-{MIwEh<XQD*<zFae$nbmzbM+iz6wqI2Fud1E-{7P5xVK
zkknfw4swqqh=98+N;oq;4^&a)W)_1gGq7ih<QW(kq6EN6pr9xd7V|}_Ag{21!;T#q
zKexC_K;><GN@7XkEmko57E@YL6l*%jMc`}-Aw)rX#nFNRmY+01xy}t#C@^v`axk(n
z2{E!Uaxh9UaWKg+iZOA3d2Gx=j7$tn3@i*R3`~qn|F~EMz^Yjoc^EmEL>MI)`4~AE
zxfnT^i)<Je7%<8jRt5$JaEb55z`&5gn8MV;P{Xi*VIc#k<gR5bVO+pe!?=*KmJw8T
zrZBWJr7)*4Nix8BEGRry5HF26m_d`x55p=pSh1!AE7qhS#)6Gw1d~jX3~9`eLQa$U
z7GwD>j)MH+;>@I+)LR_+NhOJyd8sMjpwnb35(WhxV<wmjAwa=dB*wtN5Dp4Dc!4EZ
z3MwqH6;DN=j9R3~z`*bll(00JZ?S>e9I3^(nDPp4u_qQM78NB{YBCk6f$hqGSOYF=
zL3Z6@1)CfSvJK=r1|}g!F2*8IoF#+e4H+{-ImI#z3=FB@j#3m;J3|^HIM6wwnA;gx
z7@}B$88n%1i3Fz>mZs*FWG3c>q!yR><R_-27A1pR2sMNOWE;qTAXAHj7(kVEGs6N#
za8xm+Gt@HIFfL#MMPjT{ElUY=4ND127Hcz8kwghw4NDgL0*-|Y&5Sk7;taLSg%UN)
zwX8KvS)4VjSzIZMy^M?uHB7+_noOX+3v+gAr6%_+*5cBF)S_Dg>8T|k7da;ur}}|&
z9CL0_{w=P|;^NXIa0~7h2dJ6>bxLk=z*OF10o5e8IO5||GLuW<<8QHKr&bo<Vkyoq
zD!Ij)nU|7Up~-fOr8qSw?G`UcE68|fP<t?n3&f2F<=o;ZrkvtijLuO4Afe>M;?(%u
z)RK(+l;V|)w>aY=?Z^0dNDP4@rwEjy6F^ZT017om9wtzp;9(SDWMSlEtWw1m#(L=f
z(_{rX8)Scc{4K8d_*`)BAwK>VPkek~X<`mU25i+WUZ^9IQ*&})4gn_>P--j!JIS4a
zfgv8`Bv4#(FcyLM$)Lt1$b}%R!@$760n3s{m?2p*has1-mY0#CglPfuLWWwt8lEBt
zP<*B^r?8~3_A=G-r?4$xOJPspXkn<~U%(EE_hwM0706>r;j9&?;ak9w!nKgGR<MM#
zMj(Y-oS}wSoFRoroS{ZgoFRoxoB`Y*VaVdD6)NGbVMyU^W-6Lg!;r-TZs1L-5lZ0$
z@x&R-8ES<}IBSGzIEv<_@aHk52!NV2=?t~PCA<rG7c$g}WHQu>mhd$*)QW)`W?2Hw
zj4lil7-N6LFx863Fx5)bN`i`xY|f$=HR36P;tVy6*~~>R3Xjx?EfB1cSjbo_Rl;2(
zks{R0G=Z^*twv&j&_aehmKupH;acewVW=MIG$t{ITA32g8krP<6p>!gPy=I)M7&4}
zh!o9asFh7&OcBiy%N4JcV`Qk2DUpbmtl=t=h?lAnFOi6su3^XmiOJN+rHGYCq=@%2
zEs(8|TF6){Un8F-=fV&xP%D%oQ6pO;lqGM&FoChKtVTXd!G$4KB8I6}s8%6GvPL0|
z2}!=MMm|dsNnQ#~eqD`xmQt-^i89FiX2x127z<1*HZzJdfLRJ47D#P2(*&ksrV^DV
z#u~*G#%z`eOhpPMsx^`|N+Jv?(jp8siqZ_tjCo8cjI~NNk|6OK*&68@i7d4gnK^7V
zqBY_+3^htM67ixnir|oug@oN~h7`HE%(coX@--4!qO%!N6lx@AGt6bFRVm@DQBF|=
zhm3NKax<d@$VDn3T7*HIp+*IgN=i6W<iV*$c!7F~65LGXxy&f$F{d*vWMpJ0JW`@j
zBihUu!(6Lct5~a4qFSO^qX=qT*D8Y9N?^89jbaT$yhx2w4MV&rJe7k|tqM5RszOp%
zjfez8Gh?k<jarJT6hp0gjarR*imC{M1VgPxidu?#3q!4D3S){!j##c%trjCgjaG^#
zIE8D8Gt_9-sHbQ*Gl?^#=zw_|DY{^uo;X8_K#6t@LzXBg>{ArP8NfUd5Dyd@Df*xR
zq#BTa4REW3__s!>Mlp@4Ml{864pXgmjdqQ28mJ^S@&i{iprM}l__EX@P@6G6K8iCm
zFSEoQG!PQSl9HdFtH~I}l9r!Wa*L4*qw)mR)8IaV0<7|sfb<DKeY9p!pNyr5rG^n&
zbuvQiW4gthlbUynwYa1xvjARW=0M6waM1)Vl|VIS38+*8)heJSz5qj&GFqbqTkQ$)
z0Juw0C8(OBubQi`8mXY_VWsM8r5aqtq)=AHt)oy}l2}xdnqs8@ZgzvbUd5>Z4FrWM
z360X?R0UmKP|rgFG>noDZY~yU-eLgP%eVMo?fLkU%#xhcD*iy&n3Za#m4a$9s3(+~
zm!ipdiyu6U2C8&|N<q!uB1Z-W22G}0oRHRjW=Z8OZb$<STvKT>f!eUe#eTO~(h@Us
zZn36<dL-$fQka3^gS{qOku9iEXa^$fLDfGisP9mGixupSTP&cWzludcOQEoeQ&T}f
zAtb*fF{cO=J--<Bi$HY_xYba_t_hK<;(&$BOK?jJR0TjRV_*QWK~*_ChD#V~7@8SV
z7(oT3I71CX3R5<7kyHwE9#aa7I72XlCaWLVDo7YH78ilq8=#;lE-BJvDguql78QbA
z4vu9o0gCEd95y+Lxk)LBb~_mu7(RonuTsPl(-5_qjJMcQz|HR}0kAI>3i31aN{ST{
zOBAY@6f|zJ<!0uUmZVlO=_uS{D^5+$&rA8msHw?(i#08?sJP@7OHN{O$t|WlpD0$a
zeodxZtR<k}0*%*{BxdFmYwFw*0yP}KiOEP0nmmePL0$j_%q`};d{ASsGA%PbC$%Js
z7edEF#*c2XWaVe(-QoiGRY0Ro#kV+$ON!D!<E%ycATx47ga(L60!1YYSO!%4-r`Kn
zE6q(UN-Rme#R*Gl#kWLr@)J`)NvYVaC_fj(0JTYqQa~m|g9t>j5Jhq~f?1RXQU%Jj
zw|EO7NjDxGghdh{2e9Rp<|d^U6;*>c%tfVnw>XPSlao`6i&Kkl@yCOjIKJT44XBlM
zivyHEON&yAZ?S<BY4I%%kc#4<(!3&2bK(}CV{vh65vU6c?pA}lYekVDv)K|03R3e@
zz-`hZP>Zps6y&fXP}a~XG6S&@9UxGztVjwZ25O`h$%0s*PEiyexYq;9qw#4uiRn>n
z#hE#&c_m=KK>9^oNtx;K8L6O#DkRqGLA@oO#Ny=4%=jd5H}n=`QWR%ONj#`D%1ehN
zPfL(0c92*is4ZBWoL`iBiwzX`sYSOK^T0U>oDV@Q(9Dw5D7Hdy_ABZHX=Eyl2Z!@5
zW>DBL=N6|IA>xIp*zgupvC%E2^5R=eDJ4<tkVIO1iye|ei=$Wya*A>w;b8&tDma^f
z2~b`s(qmv?$OaXQU7&oz%EQP3ZaGRZu`qHm$}q7piZJpq3Nf*O#o3r77)2PF7`Ygw
z7zG%47}*%v7(wk#5Y5KO!6d}Q#mEOz55mlBj9g4wOiGLrAazUvj6#fRj2w&_Oni(y
zP`w<CTuh*z9H<w^!>GY1#lZ5Pja7}2j}h!HO%6XlKR-9O5I;>G5bfsX76O8r61Q0M
zN^=V;i$I+^aBsN?lz)mEK#741DSd*|f-lJ19UuafQi}{gEKs-e7Dr-HI(Sf^s0E~v
z&#@>SJU$%&888qDN-PJ>S$L%86u7~LaEp{dN_i4fQa~dKVC6-ibO!Etf)n5^E>KQ_
zbZ5YAU$9@0sv~&d-{P==G_~zOJ+xv_I^_U|4TywbP@kBCi9>|t4V$P4qW}ytap`by
L3Gpy6g5h5P4zzxP

diff --git a/src/unitgrade2/__pycache__/version.cpython-38.pyc b/src/unitgrade2/__pycache__/version.cpython-38.pyc
deleted file mode 100644
index ad6a8f2a32ed6b8a848b77969560d841ab9cb8d0..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 171
zcmWIL<>g{vU|`@AvrH6aU|@I*;vi#21_lNP1_p*=4h9B>6ox2<6vki%O{OYV13g1M
z13yj1Tio&SWvNBQnfZC~@hcgMm>3u!#4l@StC-N@)S}{;lG2RS7?=Fy(%jU%lH!=s
pyv&mHqQsQcnBt;j1kWf2YK~q(<t+}I-29Z%oK!oI13rUX1OON5D+~Yt

diff --git a/src/unitgrade2/unitgrade2.py b/src/unitgrade2/unitgrade2.py
deleted file mode 100644
index 087343c..0000000
--- a/src/unitgrade2/unitgrade2.py
+++ /dev/null
@@ -1,686 +0,0 @@
-import numpy as np
-import sys
-import re
-import threading
-import tqdm
-import pickle
-import os
-from io import StringIO
-import io
-from unittest.runner import _WritelnDecorator
-from typing import Any
-import inspect
-import colorama
-from colorama import Fore
-from functools import _make_key, RLock
-from collections import namedtuple
-import unittest
-import time
-import textwrap
-
-_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
-
-colorama.init(autoreset=True)  # auto resets your settings after every output
-
-def gprint(s):
-    print(f"{Fore.GREEN}{s}")
-
-myround = lambda x: np.round(x)  # required.
-msum = lambda x: sum(x)
-mfloor = lambda x: np.floor(x)
-
-
-def setup_dir_by_class(C, base_dir):
-    name = C.__class__.__name__
-    return base_dir, name
-
-
-class Logger(object):
-    def __init__(self, buffer):
-        assert False
-        self.terminal = sys.stdout
-        self.log = buffer
-
-    def write(self, message):
-        self.terminal.write(message)
-        self.log.write(message)
-
-    def flush(self):
-        # this flush method is needed for python 3 compatibility.
-        pass
-
-
-class Capturing(list):
-    def __init__(self, *args, stdout=None, unmute=False, **kwargs):
-        self._stdout = stdout
-        self.unmute = unmute
-        super().__init__(*args, **kwargs)
-
-    def __enter__(self, capture_errors=True):  # don't put arguments here.
-        self._stdout = sys.stdout if self._stdout == None else self._stdout
-        self._stringio = StringIO()
-        if self.unmute:
-            sys.stdout = Logger(self._stringio)
-        else:
-            sys.stdout = self._stringio
-
-        if capture_errors:
-            self._sterr = sys.stderr
-            sys.sterr = StringIO()  # memory hole it
-        self.capture_errors = capture_errors
-        return self
-
-    def __exit__(self, *args):
-        self.extend(self._stringio.getvalue().splitlines())
-        del self._stringio  # free up some memory
-        sys.stdout = self._stdout
-        if self.capture_errors:
-            sys.sterr = self._sterr
-
-
-class Capturing2(Capturing):
-    def __exit__(self, *args):
-        lines = self._stringio.getvalue().splitlines()
-        txt = "\n".join(lines)
-        numbers = extract_numbers(txt)
-        self.extend(lines)
-        del self._stringio  # free up some memory
-        sys.stdout = self._stdout
-        if self.capture_errors:
-            sys.sterr = self._sterr
-
-        self.output = txt
-        self.numbers = numbers
-
-
-class Report:
-    title = "report title"
-    version = None
-    questions = []
-    pack_imports = []
-    individual_imports = []
-    nL = 120  # Maximum line width
-
-    @classmethod
-    def reset(cls):
-        for (q, _) in cls.questions:
-            if hasattr(q, 'reset'):
-                q.reset()
-
-    @classmethod
-    def mfile(clc):
-        return inspect.getfile(clc)
-
-    def _file(self):
-        return inspect.getfile(type(self))
-
-    def _import_base_relative(self):
-        if hasattr(self.pack_imports[0], '__path__'):
-            root_dir = self.pack_imports[0].__path__._path[0]
-        else:
-            root_dir = self.pack_imports[0].__file__
-
-        root_dir = os.path.dirname(root_dir)
-        relative_path = os.path.relpath(self._file(), root_dir)
-        modules = os.path.normpath(relative_path[:-3]).split(os.sep)
-        return root_dir, relative_path, modules
-
-    def __init__(self, strict=False, payload=None):
-        working_directory = os.path.abspath(os.path.dirname(self._file()))
-        self.wdir, self.name = setup_dir_by_class(self, working_directory)
-        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
-        for (q, _) in self.questions:
-            q.nL = self.nL  # Set maximum line length.
-
-        if payload is not None:
-            self.set_payload(payload, strict=strict)
-
-    def main(self, verbosity=1):
-        # Run all tests using standard unittest (nothing fancy).
-        loader = unittest.TestLoader()
-        for q, _ in self.questions:
-            start = time.time()  # A good proxy for setup time is to
-            suite = loader.loadTestsFromTestCase(q)
-            unittest.TextTestRunner(verbosity=verbosity).run(suite)
-            total = time.time() - start
-            q.time = total
-
-    def _setup_answers(self, with_coverage=False):
-        if with_coverage:
-            for q, _ in self.questions:
-                q._with_coverage = True
-                q._report = self
-
-        self.main()  # Run all tests in class just to get that out of the way...
-        report_cache = {}
-        for q, _ in self.questions:
-            # print(self.questions)
-            if hasattr(q, '_save_cache'):
-                q()._save_cache()
-                print("q is", q())
-                q()._cache_put('time', q.time) # = q.time
-                report_cache[q.__qualname__] = q._cache2
-            else:
-                report_cache[q.__qualname__] = {'no cache see _setup_answers in unitgrade2.py': True}
-        if with_coverage:
-            for q, _ in self.questions:
-                q._with_coverage = False
-        return report_cache
-
-    def set_payload(self, payloads, strict=False):
-        for q, _ in self.questions:
-            q._cache = payloads[q.__qualname__]
-
-
-def rm_progress_bar(txt):
-    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.
-    nlines = []
-    for l in txt.splitlines():
-        pct = l.find("%")
-        ql = False
-        if pct > 0:
-            i = l.find("|", pct + 1)
-            if i > 0 and l.find("|", i + 1) > 0:
-                ql = True
-        if not ql:
-            nlines.append(l)
-    return "\n".join(nlines)
-
-
-def extract_numbers(txt):
-    # txt = rm_progress_bar(txt)
-    numeric_const_pattern = r'[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
-    rx = re.compile(numeric_const_pattern, re.VERBOSE)
-    all = rx.findall(txt)
-    all = [float(a) if ('.' in a or "e" in a) else int(a) for a in all]
-    if len(all) > 500:
-        print(txt)
-        raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))
-    return all
-
-
-class ActiveProgress():
-    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):
-        if file == None:
-            file = sys.stdout
-        self.file = file
-        self.t = t
-        self._running = False
-        self.title = title
-        self.dt = 0.01
-        self.n = int(np.round(self.t / self.dt))
-        self.show_progress_bar = show_progress_bar
-        self.pbar = None
-
-        if start:
-            self.start()
-
-    def start(self):
-        self._running = True
-        if self.show_progress_bar:
-            self.thread = threading.Thread(target=self.run)
-            self.thread.start()
-        self.time_started = time.time()
-
-    def terminate(self):
-        if not self._running:
-            raise Exception("Stopping a stopped progress bar. ")
-        self._running = False
-        if self.show_progress_bar:
-            self.thread.join()
-        if self.pbar is not None:
-            self.pbar.update(1)
-            self.pbar.close()
-            self.pbar = None
-
-        self.file.flush()
-        return time.time() - self.time_started
-
-    def run(self):
-        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,
-                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')
-
-        for _ in range(self.n - 1):  # Don't terminate completely; leave bar at 99% done until terminate.
-            if not self._running:
-                self.pbar.close()
-                self.pbar = None
-                break
-
-            time.sleep(self.dt)
-            self.pbar.update(1)
-
-def dprint(first, last, nL, extra = "", file=None, dotsym='.', color='white'):
-    if file == None:
-        file = sys.stdout
-    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))
-    print(first + dot_parts, end="", file=file)
-    last += extra
-    print(last, file=file)
-
-
-class UTextResult(unittest.TextTestResult):
-    nL = 80
-    number = -1  # HAcky way to set question number.
-    show_progress_bar = True
-    cc = None
-
-    def __init__(self, stream, descriptions, verbosity):
-        super().__init__(stream, descriptions, verbosity)
-        self.successes = []
-
-    def printErrors(self) -> None:
-        self.printErrorList('ERROR', self.errors)
-        self.printErrorList('FAIL', self.failures)
-
-    def addError(self, test, err):
-        super(unittest.TextTestResult, self).addFailure(test, err)
-        self.cc_terminate(success=False)
-
-    def addFailure(self, test, err):
-        super(unittest.TextTestResult, self).addFailure(test, err)
-        self.cc_terminate(success=False)
-
-    def addSuccess(self, test: unittest.case.TestCase) -> None:
-        self.successes.append(test)
-        self.cc_terminate()
-
-    def cc_terminate(self, success=True):
-        if self.show_progress_bar or True:
-            tsecs = np.round(self.cc.terminate(), 2)
-            self.cc.file.flush()
-            ss = self.item_title_print
-
-            state = "PASS" if success else "FAILED"
-
-            dot_parts = ('.' * max(0, self.nL - len(state) - len(ss)))
-            if self.show_progress_bar or True:
-                print(self.item_title_print + dot_parts, end="", file=self.cc.file)
-            else:
-                print(dot_parts, end="", file=self.cc.file)
-
-            if tsecs >= 0.5:
-                state += " (" + str(tsecs) + " seconds)"
-            print(state, file=self.cc.file)
-
-    def startTest(self, test):
-        # j =self.testsRun
-        self.testsRun += 1
-        # item_title = self.getDescription(test)
-        item_title = test.shortDescription()  # Better for printing (get from cache).
-        if item_title == None:
-            # For unittest framework where getDescription may return None.
-            item_title = self.getDescription(test)
-        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)
-        estimated_time = 10
-        if self.show_progress_bar or True:
-            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)
-        else:
-            print(self.item_title_print + ('.' * max(0, self.nL - 4 - len(self.item_title_print))), end="")
-
-        self._test = test
-        self._stdout = sys.stdout
-        sys.stdout = io.StringIO()
-
-    def stopTest(self, test):
-        sys.stdout = self._stdout
-        super().stopTest(test)
-
-    def _setupStdout(self):
-        if self._previousTestClass == None:
-            total_estimated_time = 1
-            if hasattr(self.__class__, 'q_title_print'):
-                q_title_print = self.__class__.q_title_print
-            else:
-                q_title_print = "<unnamed test. See unitgrade.py>"
-
-            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)
-            self.cc = cc
-
-    def _restoreStdout(self):  # Used when setting up the test.
-        if self._previousTestClass is None:
-            q_time = self.cc.terminate()
-            q_time = np.round(q_time, 2)
-            sys.stdout.flush()
-            if self.show_progress_bar:
-                print(self.cc.title, end="")
-            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))
-
-
-class UTextTestRunner(unittest.TextTestRunner):
-    def __init__(self, *args, **kwargs):
-        stream = io.StringIO()
-        super().__init__(*args, stream=stream, **kwargs)
-
-    def _makeResult(self):
-        # stream = self.stream # not you!
-        stream = sys.stdout
-        stream = _WritelnDecorator(stream)
-        return self.resultclass(stream, self.descriptions, self.verbosity)
-
-
-def cache(foo, typed=False):
-    """ Magic cache wrapper
-    https://github.com/python/cpython/blob/main/Lib/functools.py
-    """
-    maxsize = None
-    def wrapper(self, *args, **kwargs):
-        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))
-        if not self._cache_contains(key):
-            value = foo(self, *args, **kwargs)
-            self._cache_put(key, value)
-        else:
-            value = self._cache_get(key)
-        return value
-
-    return wrapper
-
-
-def get_hints(ss):
-    if ss == None:
-        return None
-    try:
-        ss = textwrap.dedent(ss)
-        ss = ss.replace('''"""''', "").strip()
-        hints = ["hints:", "hint:"]
-        j = np.argmax([ss.lower().find(h) for h in hints])
-        h = hints[j]
-        ss = ss[ss.lower().find(h) + len(h) + 1:]
-        ss = "\n".join([l for l in ss.split("\n") if not l.strip().startswith(":")])
-        ss = textwrap.dedent(ss).strip()
-        return ss
-    except Exception as e:
-        print("bad hints", ss, e)
-
-
-class UTestCase(unittest.TestCase):
-    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.
-    _cache = None  # Read-only cache. Ensures method always produce same result.
-    _cache2 = None  # User-written cache.
-    _with_coverage = False
-    _report = None  # The report used. This is very, very hacky and should always be None. Don't rely on it!
-
-    def capture(self):
-        if hasattr(self, '_stdout') and self._stdout is not None:
-            file = self._stdout
-        else:
-            # self._stdout = sys.stdout
-            # sys._stdout = io.StringIO()
-            file = sys.stdout
-        return Capturing2(stdout=file)
-
-    @classmethod
-    def question_title(cls):
-        """ Return the question title """
-        return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__
-
-    @classmethod
-    def reset(cls):
-        print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")
-        cls._outcome = None
-        cls._cache = None
-        cls._cache2 = None
-
-    def _callSetUp(self):
-        if self._with_coverage:
-            if not hasattr(self._report, 'covcache'):
-                self._report.covcache = {}
-            import coverage
-            self.cov = coverage.Coverage()
-            self.cov.start()
-        self.setUp()
-
-    def _callTearDown(self):
-        self.tearDown()
-        if self._with_coverage:
-            from pathlib import Path
-            from snipper import snipper_main
-            self.cov.stop()
-            data = self.cov.get_data()
-            base, _, _ = self._report._import_base_relative()
-            for file in data.measured_files():
-                file = os.path.normpath(file)
-                root = Path(base)
-                child = Path(file)
-                if root in child.parents:
-                    with open(child, 'r') as f:
-                        s = f.read()
-                    lines = s.splitlines()
-                    garb = 'GARBAGE'
-                    lines2 = snipper_main.censor_code(lines, keep=True)
-                    assert len(lines) == len(lines2)
-                    for l in data.contexts_by_lineno(file):
-                        if lines2[l].strip() == garb:
-                            if self.cache_id() not in self._report.covcache:
-                                self._report.covcache[self.cache_id()] = {}
-
-                            rel = os.path.relpath(child, root)
-                            cc = self._report.covcache[self.cache_id()]
-                            j = 0
-                            for j in range(l, -1, -1):
-                                if "def" in lines2[j] or "class" in lines2[j]:
-                                    break
-                            from snipper.legacy import gcoms
-                            fun = lines2[j]
-                            comments, _ = gcoms("\n".join(lines2[j:l]))
-                            if rel not in cc:
-                                cc[rel] = {}
-                            cc[rel][fun] = (l, "\n".join(comments))
-                            self._cache_put((self.cache_id(), 'coverage'), self._report.covcache)
-
-    def shortDescriptionStandard(self):
-        sd = super().shortDescription()
-        if sd is None:
-            sd = self._testMethodName
-        return sd
-
-    def shortDescription(self):
-        sd = self.shortDescriptionStandard()
-        title = self._cache_get((self.cache_id(), 'title'), sd)
-        return title if title is not None else sd
-
-    @property
-    def title(self):
-        return self.shortDescription()
-
-    @title.setter
-    def title(self, value):
-        self._cache_put((self.cache_id(), 'title'), value)
-
-    def _get_outcome(self):
-        if not (self.__class__, '_outcome') or self.__class__._outcome is None:
-            self.__class__._outcome = {}
-        return self.__class__._outcome
-
-    def _callTestMethod(self, testMethod):
-        t = time.time()
-        self._ensure_cache_exists()  # Make sure cache is there.
-        if self._testMethodDoc is not None:
-            self._cache_put((self.cache_id(), 'title'), self.shortDescriptionStandard())
-
-        self._cache2[(self.cache_id(), 'assert')] = {}
-        res = testMethod()
-        elapsed = time.time() - t
-        self._get_outcome()[self.cache_id()] = res
-        self._cache_put((self.cache_id(), "time"), elapsed)
-
-    def cache_id(self):
-        c = self.__class__.__qualname__
-        m = self._testMethodName
-        return c, m
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        self._load_cache()
-        self._assert_cache_index = 0
-
-    def _ensure_cache_exists(self):
-        if not hasattr(self.__class__, '_cache') or self.__class__._cache == None:
-            self.__class__._cache = dict()
-        if not hasattr(self.__class__, '_cache2') or self.__class__._cache2 == None:
-            self.__class__._cache2 = dict()
-
-    def _cache_get(self, key, default=None):
-        self._ensure_cache_exists()
-        return self.__class__._cache.get(key, default)
-
-    def _cache_put(self, key, value):
-        self._ensure_cache_exists()
-        self.__class__._cache2[key] = value
-
-    def _cache_contains(self, key):
-        self._ensure_cache_exists()
-        return key in self.__class__._cache
-
-    def wrap_assert(self, assert_fun, first, *args, **kwargs):
-        # sys.stdout = self._stdout
-        key = (self.cache_id(), 'assert')
-        if not self._cache_contains(key):
-            print("Warning, framework missing", key)
-            self.__class__._cache[
-                key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.
-        cache = self._cache_get(key)
-        id = self._assert_cache_index
-        if not id in cache:
-            print("Warning, framework missing cache index", key, "id =", id)
-        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")
-
-        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.
-        cache[id] = first
-        self._cache_put(key, cache)
-        self._assert_cache_index += 1
-        assert_fun(first, _expected, *args, **kwargs)
-
-    def assertEqualC(self, first: Any, msg: Any = ...) -> None:
-        self.wrap_assert(self.assertEqual, first, msg)
-
-    def _cache_file(self):
-        # The filename-directory stuff is a bit tricky but this seems robust.
-        return os.path.dirname(inspect.getabsfile(type(self))) + "/unitgrade/" + self.__class__.__name__ + ".pkl"
-
-    def _save_cache(self):
-        # get the class name (i.e. what to save to).
-        cfile = self._cache_file()
-        if not os.path.isdir(os.path.dirname(cfile)):
-            os.makedirs(os.path.dirname(cfile))
-
-        if hasattr(self.__class__, '_cache2'):
-            with open(cfile, 'wb') as f:
-                pickle.dump(self.__class__._cache2, f)
-
-    # But you can also set cache explicitly.
-    def _load_cache(self):
-        if self._cache is not None:  # Cache already loaded. We will not load it twice.
-            return
-            # raise Exception("Loaded cache which was already set. What is going on?!")
-        cfile = self._cache_file()
-        if os.path.exists(cfile):
-            try:
-                # print("\ncache file", cfile)
-                with open(cfile, 'rb') as f:
-                    data = pickle.load(f)
-                self.__class__._cache = data
-            except Exception as e:
-                print("Bad cache", cfile)
-                print(e)
-        else:
-            print("Warning! data file not found", cfile)
-
-    def _feedErrorsToResult(self, result, errors):
-        """ Use this to show hints on test failure. """
-        if not isinstance(result, UTextResult):
-            er = [e for e, v in errors if v != None]
-
-            if len(er) > 0:
-                hints = []
-                key = (self.cache_id(), 'coverage')
-                if self._cache_contains(key):
-                    CC = self._cache_get(key)
-                    for id in CC:
-                        if id == self.cache_id():
-                            cl, m = id
-                            gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:")  # For the test {id} in {file} you should edit:")
-                            for file in CC[id]:
-                                rec = CC[id][file]
-                                gprint(f">   * {file}")
-                                for l in rec:
-                                    _, comments = CC[id][file][l]
-                                    hint = get_hints(comments)
-
-                                    if hint != None:
-                                        # hint = textwrap.dedent(hint)
-                                        hints.append((hint, file, l) )
-                                    gprint(f">      - {l}")
-
-                er = er[0]
-                doc = er._testMethodDoc
-                if doc is not None:
-                    hint = get_hints(er._testMethodDoc)
-                    if hint is not None:
-                        hints = [(hint, None, self.cache_id()[1] )] + hints
-                if len(hints) > 0:
-                    for hint, file, method in hints:
-                        s = (f"'{method.strip()}'" if method is not None else "")
-                        if method is not None and file is not None:
-                            s += " in "
-                        s += (file.strip() if file is not None else "")
-                        gprint(">")
-                        gprint("> Hints (from " + s  + ")")
-                        gprint(textwrap.indent(hint, ">   "))
-
-        super()._feedErrorsToResult(result, errors)
-
-    def startTestRun(self):
-        # print("asdfsdaf 11", file=sys.stderr)
-        super().startTestRun()
-        # print("asdfsdaf")
-
-    def _callTestMethod(self, method):
-        # print("asdfsdaf")
-        super()._callTestMethod(method)
-
-
-def hide(func):
-    return func
-
-
-def makeRegisteringDecorator(foreignDecorator):
-    """
-        Returns a copy of foreignDecorator, which is identical in every
-        way(*), except also appends a .decorator property to the callable it
-        spits out.
-    """
-
-    def newDecorator(func):
-        # Call to newDecorator(method)
-        # Exactly like old decorator, but output keeps track of what decorated it
-        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done
-        R.decorator = newDecorator  # keep track of decorator
-        # R.original = func         # might as well keep track of everything!
-        return R
-
-    newDecorator.__name__ = foreignDecorator.__name__
-    newDecorator.__doc__ = foreignDecorator.__doc__
-    return newDecorator
-
-hide = makeRegisteringDecorator(hide)
-
-def methodsWithDecorator(cls, decorator):
-    """
-        Returns all methods in CLS with DECORATOR as the
-        outermost decorator.
-
-        DECORATOR must be a "registering decorator"; one
-        can make any decorator "registering" via the
-        makeRegisteringDecorator function.
-
-        import inspect
-        ls = list(methodsWithDecorator(GeneratorQuestion, deco))
-        for f in ls:
-            print(inspect.getsourcelines(f) ) # How to get all hidden questions.
-    """
-    for maybeDecorated in cls.__dict__.values():
-        if hasattr(maybeDecorated, 'decorator'):
-            if maybeDecorated.decorator == decorator:
-                print(maybeDecorated)
-                yield maybeDecorated
-# 817, 705
\ No newline at end of file
diff --git a/src/unitgrade2/unitgrade_helpers2.py b/src/unitgrade2/unitgrade_helpers2.py
deleted file mode 100644
index 7f538f0..0000000
--- a/src/unitgrade2/unitgrade_helpers2.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import numpy as np
-from tabulate import tabulate
-from datetime import datetime
-import pyfiglet
-from unitgrade2 import msum
-import unittest
-from unitgrade2.unitgrade2 import UTextResult
-import inspect
-import os
-import argparse
-import time
-
-parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: 
-To run all tests in a report: 
-
-> python assignment1_dp.py
-
-To run only question 2 or question 2.1
-
-> python assignment1_dp.py -q 2
-> python assignment1_dp.py -q 2.1
-
-Note this scripts does not grade your report. To grade your report, use:
-
-> python report1_grade.py
-
-Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
-For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
-
-> python -m course_package.report1
-
-see https://docs.python.org/3.9/using/cmdline.html
-""", formatter_class=argparse.RawTextHelpFormatter)
-parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)')
-parser.add_argument('--showexpected',  action="store_true",  help='Show the expected/desired result')
-parser.add_argument('--showcomputed',  action="store_true",  help='Show the answer your code computes')
-parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
-parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
-parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars.')
-
-def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False, show_privisional=True, noprogress=None):
-    args = parser.parse_args()
-    if noprogress is None:
-        noprogress = args.noprogress
-
-    if question is None and args.q is not None:
-        question = args.q
-        if "." in question:
-            question, qitem = [int(v) for v in question.split(".")]
-        else:
-            question = int(question)
-
-    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
-        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")
-
-    if unmute is None:
-        unmute = args.unmute
-    if passall is None:
-        passall = args.passall
-
-
-    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,
-                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
-                                          show_tol_err=show_tol_err)
-
-
-    if question is None and show_privisional:
-        print("Provisional evaluation")
-        tabulate(table_data)
-        table = table_data
-        print(tabulate(table))
-        print(" ")
-
-    fr = inspect.getouterframes(inspect.currentframe())[1].filename
-    gfile = os.path.basename(fr)[:-3] + "_grade.py"
-    if os.path.exists(gfile):
-        print("Note your results have not yet been registered. \nTo register your results, please run the file:")
-        print(">>>", gfile)
-        print("In the same manner as you ran this file.")
-
-
-    return results
-
-
-def upack(q):
-    # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()])
-    h =[(i['w'], i['possible'], i['obtained']) for i in q.values()]
-    h = np.asarray(h)
-    return h[:,0], h[:,1], h[:,2],
-
-# class UnitgradeTextRunner(unittest.TextTestRunner):
-#     def __init__(self, *args, **kwargs):
-#         super().__init__(*args, **kwargs)
-
-class SequentialTestLoader(unittest.TestLoader):
-    def getTestCaseNames(self, testCaseClass):
-        test_names = super().getTestCaseNames(testCaseClass)
-        # testcase_methods = list(testCaseClass.__dict__.keys())
-        ls = []
-        for C in testCaseClass.mro():
-            if issubclass(C, unittest.TestCase):
-                ls = list(C.__dict__.keys()) + ls
-        testcase_methods = ls
-        test_names.sort(key=testcase_methods.index)
-        return test_names
-
-def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
-                    show_progress_bar=True,
-                    show_tol_err=False,
-                    big_header=True):
-
-    from unitgrade2.version import __version__
-    now = datetime.now()
-    if big_header:
-        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
-        b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
-    else:
-        b = "Unitgrade"
-    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
-    print(b + " v" + __version__ + ", started: " + dt_string+ "\n")
-    # print("Started: " + dt_string)
-    s = report.title
-    if hasattr(report, "version") and report.version is not None:
-        s += " version " + report.version
-    print(s, "(use --help for options)" if show_help_flag else "")
-    # print(f"Loaded answers from: ", report.computed_answers_file, "\n")
-    table_data = []
-    t_start = time.time()
-    score = {}
-    loader = SequentialTestLoader()
-
-    for n, (q, w) in enumerate(report.questions):
-        if question is not None and n+1 != question:
-            continue
-        suite = loader.loadTestsFromTestCase(q)
-        qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__
-        q_title_print = "Question %i: %s"%(n+1, qtitle)
-        print(q_title_print, end="")
-        q.possible = 0
-        q.obtained = 0
-        q_ = {} # Gather score in this class.
-        from unitgrade2.unitgrade2 import UTextTestRunner
-        UTextResult.q_title_print = q_title_print # Hacky
-        UTextResult.show_progress_bar = show_progress_bar # Hacky.
-        UTextResult.number = n
-        UTextResult.nL = report.nL
-
-        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
-
-        for s in res.successes:
-            q_[s._testMethodName] = ("pass",None)
-        for (s,msg) in res.failures:
-            q_[s._testMethodName] = ("fail", msg)
-        for (s,msg) in res.errors:
-            q_[s._testMethodName] = ("error", msg)
-
-
-        possible = res.testsRun
-        obtained = len(res.successes)
-
-        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
-
-        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
-        score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle, 'name': q.__name__}
-        q.obtained = obtained
-        q.possible = possible
-
-        s1 = f" * q{n+1})   Total"
-        s2 = f" {q.obtained}/{w}"
-        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )
-        print(" ")
-        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])
-
-    ws, possible, obtained = upack(score)
-    possible = int( msum(possible) )
-    obtained = int( msum(obtained) ) # Cast to python int
-    report.possible = possible
-    report.obtained = obtained
-    now = datetime.now()
-    dt_string = now.strftime("%H:%M:%S")
-
-    dt = int(time.time()-t_start)
-    minutes = dt//60
-    seconds = dt - minutes*60
-    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
-
-    from unitgrade2.unitgrade2 import dprint
-    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",
-           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)
-
-    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")
-
-    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
-    results = {'total': (obtained, possible), 'details': score}
-    return results, table_data
diff --git a/src/unitgrade2/version.py b/src/unitgrade2/version.py
deleted file mode 100644
index d1f2e39..0000000
--- a/src/unitgrade2/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.1"
\ No newline at end of file
diff --git a/src/unitgrade_v1/__init__.py b/src/unitgrade_v1/__init__.py
new file mode 100644
index 0000000..4024be0
--- /dev/null
+++ b/src/unitgrade_v1/__init__.py
@@ -0,0 +1,36 @@
+import os
+
+# DONT't import stuff here since install script requires __version__
+
+def cache_write(object, file_name, verbose=True):
+    import compress_pickle
+    dn = os.path.dirname(file_name)
+    if not os.path.exists(dn):
+        os.mkdir(dn)
+    if verbose: print("Writing cache...", file_name)
+    with open(file_name, 'wb', ) as f:
+        compress_pickle.dump(object, f, compression="lzma")
+    if verbose: print("Done!")
+
+
+def cache_exists(file_name):
+    # file_name = cn_(file_name) if cache_prefix else file_name
+    return os.path.exists(file_name)
+
+
+def cache_read(file_name):
+    import compress_pickle # Import here because if you import in top the __version__ tag will fail.
+    # file_name = cn_(file_name) if cache_prefix else file_name
+    if os.path.exists(file_name):
+        try:
+            with open(file_name, 'rb') as f:
+                return compress_pickle.load(f, compression="lzma")
+        except Exception as e:
+            print("Tried to load a bad pickle file at", file_name)
+            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")
+            print(e)
+            # return pickle.load(f)
+    else:
+        return None
+
+from unitgrade_v1.unitgrade import Hidden, myround, mfloor, msum, Capturing, ActiveProgress
diff --git a/src/unitgrade_v1/unitgrade.py b/src/unitgrade_v1/unitgrade.py
new file mode 100644
index 0000000..1ecc37b
--- /dev/null
+++ b/src/unitgrade_v1/unitgrade.py
@@ -0,0 +1,414 @@
+"""
+git add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade_v1.git --upgrade
+
+"""
+from . import cache_read
+import unittest
+import numpy as np
+import os
+import sys
+from io import StringIO
+import collections
+import inspect
+import re
+import threading
+import tqdm
+import time
+
+myround = lambda x: np.round(x)  # required.
+msum = lambda x: sum(x)
+mfloor = lambda x: np.floor(x)
+
+def setup_dir_by_class(C,base_dir):
+    name = C.__class__.__name__
+    # base_dir = os.path.join(base_dir, name)
+    # if not os.path.isdir(base_dir):
+    #     os.makedirs(base_dir)
+    return base_dir, name
+
+class Hidden:
+    def hide(self):
+        return True
+
+class Logger(object):
+    def __init__(self, buffer):
+        self.terminal = sys.stdout
+        self.log = buffer
+
+    def write(self, message):
+        self.terminal.write(message)
+        self.log.write(message)
+
+    def flush(self):
+        # this flush method is needed for python 3 compatibility.
+        pass
+
+class Capturing(list):
+    def __init__(self, *args, unmute=False, **kwargs):
+        self.unmute = unmute
+        super().__init__(*args, **kwargs)
+
+    def __enter__(self, capture_errors=True): # don't put arguments here.
+        self._stdout = sys.stdout
+        self._stringio = StringIO()
+        if self.unmute:
+            sys.stdout = Logger(self._stringio)
+        else:
+            sys.stdout = self._stringio
+
+        if capture_errors:
+            self._sterr = sys.stderr
+            sys.sterr = StringIO() # memory hole it
+        self.capture_errors = capture_errors
+        return self
+
+    def __exit__(self, *args):
+        self.extend(self._stringio.getvalue().splitlines())
+        del self._stringio    # free up some memory
+        sys.stdout = self._stdout
+        if self.capture_errors:
+            sys.sterr = self._sterr
+
+
+class QItem(unittest.TestCase):
+    title = None
+    testfun = None
+    tol = 0
+    estimated_time = 0.42
+    _precomputed_payload = None
+    _computed_answer = None # Internal helper to later get results.
+    weight = 1 # the weight of the question.
+
+    def __init__(self, question=None, *args, **kwargs):
+        if self.tol > 0 and self.testfun is None:
+            self.testfun = self.assertL2Relative
+        elif self.testfun is None:
+            self.testfun = self.assertEqual
+
+        self.name = self.__class__.__name__
+        # self._correct_answer_payload = correct_answer_payload
+        self.question = question
+
+        super().__init__(*args, **kwargs)
+        if self.title is None:
+            self.title = self.name
+
+    def _safe_get_title(self):
+        if self._precomputed_title is not None:
+            return self._precomputed_title
+        return self.title
+
+    def assertNorm(self, computed, expected, tol=None):
+        if tol == None:
+            tol = self.tol
+        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )
+        nrm = np.sqrt(np.sum( diff ** 2))
+
+        self.error_computed = nrm
+
+        if nrm > tol:
+            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")
+            print(f"Element-wise differences {diff.tolist()}")
+            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
+
+    def assertL2(self, computed, expected, tol=None):
+        if tol == None:
+            tol = self.tol
+        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
+        self.error_computed = np.max(diff)
+
+        if np.max(diff) > tol:
+            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")
+            print(f"Element-wise differences {diff.tolist()}")
+            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")
+
+    def assertL2Relative(self, computed, expected, tol=None):
+        if tol == None:
+            tol = self.tol
+        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
+        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )
+        self.error_computed = np.max(np.abs(diff))
+        if np.sum(diff > tol) > 0:
+            print(f"Not equal within tolerance {tol}")
+            print(f"Element-wise differences {diff.tolist()}")
+            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
+
+    def precomputed_payload(self):
+        return self._precomputed_payload
+
+    def precompute_payload(self):
+        # Pre-compute resources to include in tests (useful for getting around rng).
+        pass
+
+    def compute_answer(self, unmute=False):
+        raise NotImplementedError("test code here")
+
+    def test(self, computed, expected):
+        self.testfun(computed, expected)
+
+    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):
+        possible = 1
+        computed = None
+        def show_computed_(computed):
+            print(">>> Your output:")
+            print(computed)
+
+        def show_expected_(expected):
+            print(">>> Expected output (note: may have been processed; read text script):")
+            print(expected)
+
+        correct = self._correct_answer_payload
+        try:
+            if unmute: # Required to not mix together print stuff.
+                print("")
+            computed = self.compute_answer(unmute=unmute)
+        except Exception as e:
+            if not passall:
+                if not silent:
+                    print("\n=================================================================================")
+                    print(f"When trying to run test class '{self.name}' your code threw an error:", e)
+                    show_expected_(correct)
+                    import traceback
+                    print(traceback.format_exc())
+                    print("=================================================================================")
+                return (0, possible)
+
+        if self._computed_answer is None:
+            self._computed_answer = computed
+
+        if show_expected or show_computed:
+            print("\n")
+        if show_expected:
+            show_expected_(correct)
+        if show_computed:
+            show_computed_(computed)
+        try:
+            if not passall:
+                self.test(computed=computed, expected=correct)
+        except Exception as e:
+            if not silent:
+                print("\n=================================================================================")
+                print(f"Test output from test class '{self.name}' does not match expected result. Test error:")
+                print(e)
+                show_computed_(computed)
+                show_expected_(correct)
+            return (0, possible)
+        return (1, possible)
+
+    def score(self):
+        try:
+            self.test()
+        except Exception as e:
+            return 0
+        return 1
+
+class QPrintItem(QItem):
+    def compute_answer_print(self):
+        """
+        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values
+        are send to process_output (see compute_answer below). In other words, the text generated is:
+
+        res = compute_Answer_print()
+        txt = (any terminal output generated above)
+        numbers = (any numbers found in terminal-output txt)
+
+        self.test(process_output(res, txt, numbers), <expected result>)
+
+        :return: Optional values for comparison
+        """
+        raise Exception("Generate output here. The output is passed to self.process_output")
+
+    def process_output(self, res, txt, numbers):
+        return res
+
+    def compute_answer(self, unmute=False):
+        with Capturing(unmute=unmute) as output:
+            res = self.compute_answer_print()
+        s = "\n".join(output)
+        s = rm_progress_bar(s) # Remove progress bar.
+        numbers = extract_numbers(s)
+        self._computed_answer = (res, s, numbers)
+        return self.process_output(res, s, numbers)
+
+class OrderedClassMembers(type):
+    @classmethod
+    def __prepare__(self, name, bases):
+        return collections.OrderedDict()
+    def __new__(self, name, bases, classdict):
+        ks = list(classdict.keys())
+        for b in bases:
+            ks += b.__ordered__
+        classdict['__ordered__'] = [key for key in ks if key not in ('__module__', '__qualname__')]
+        return type.__new__(self, name, bases, classdict)
+
+class QuestionGroup(metaclass=OrderedClassMembers):
+    title = "Untitled question"
+    partially_scored = False
+    t_init = 0  # Time spend on initialization (placeholder; set this externally).
+    estimated_time = 0.42
+    has_called_init_ = False
+    _name = None
+    _items = None
+
+    @property
+    def items(self):
+        if self._items == None:
+            self._items = []
+            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]
+            for I in members:
+                self._items.append( I(question=self))
+        return self._items
+
+    @items.setter
+    def items(self, value):
+        self._items = value
+
+    @property
+    def name(self):
+        if self._name == None:
+            self._name = self.__class__.__name__
+        return self._name #
+
+    @name.setter
+    def name(self, val):
+        self._name = val
+
+    def init(self):
+        # Can be used to set resources relevant for this question instance.
+        pass
+
+    def init_all_item_questions(self):
+        for item in self.items:
+            if not item.question.has_called_init_:
+                item.question.init()
+                item.question.has_called_init_ = True
+
+
+class Report():
+    title = "report title"
+    version = None
+    questions = []
+    pack_imports = []
+    individual_imports = []
+
+    def __init__(self, strict=False, payload=None):
+        working_directory = os.path.abspath(os.path.dirname(inspect.getfile(type(self))))
+        self.wdir, self.name = setup_dir_by_class(self, working_directory)
+        self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
+        import time
+        qs = [] # Has to accumulate to new array otherwise the setup/evaluation steps cannot be run in sequence.
+        for k, (Q, w) in enumerate(self.questions):
+            start = time.time()
+            q = Q()
+            q.t_init = time.time() - start
+            for k, i in enumerate(q.items):
+                i.name = i.name + "_" + str(k)
+            qs.append((q, w))
+
+        self.questions = qs
+        if payload is not None:
+            self.set_payload(payload, strict=strict)
+        else:
+            if os.path.isfile(self.computed_answers_file):
+                self.set_payload(cache_read(self.computed_answers_file), strict=strict)
+            else:
+                s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."
+                if strict:
+                    raise Exception(s)
+                else:
+                    print(s)
+
+
+    def set_payload(self, payloads, strict=False):
+        for q, _ in self.questions:
+            for item in q.items:
+                if q.name not in payloads or item.name not in payloads[q.name]:
+                    s = f"> Broken resource dictionary submitted to unitgrade_v1 for question {q.name} and subquestion {item.name}. Framework will not work."
+                    if strict:
+                        raise Exception(s)
+                    else:
+                        print(s)
+                else:
+                    item._correct_answer_payload = payloads[q.name][item.name]['payload']
+                    item.estimated_time = payloads[q.name][item.name].get("time", 1)
+                    q.estimated_time = payloads[q.name].get("time", 1)
+                    if "precomputed" in payloads[q.name][item.name]: # Consider removing later.
+                        item._precomputed_payload = payloads[q.name][item.name]['precomputed']
+                    try:
+                        if "title" in payloads[q.name][item.name]: # can perhaps be removed later.
+                            item.title = payloads[q.name][item.name]['title']
+                    except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).
+                        pass
+                        # print("bad", e)
+        self.payloads = payloads
+
+
+def rm_progress_bar(txt):
+    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.
+    nlines = []
+    for l in txt.splitlines():
+        pct = l.find("%")
+        ql = False
+        if pct > 0:
+            i = l.find("|", pct+1)
+            if i > 0 and l.find("|", i+1) > 0:
+                ql = True
+        if not ql:
+            nlines.append(l)
+    return "\n".join(nlines)
+
+def extract_numbers(txt):
+    # txt = rm_progress_bar(txt)
+    numeric_const_pattern = '[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
+    rx = re.compile(numeric_const_pattern, re.VERBOSE)
+    all = rx.findall(txt)
+    all = [float(a) if ('.' in a or "e" in a) else int(a) for a in all]
+    if len(all) > 500:
+        print(txt)
+        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, many numbers!", len(all))
+    return all
+
+
+class ActiveProgress():
+    def __init__(self, t, start=True, title="my progress bar"):
+        self.t = t
+        self._running = False
+        self.title = title
+        self.dt = 0.1
+
+        self.n = int(np.round(self.t / self.dt))
+        # self.pbar = tqdm.tqdm(total=self.n)
+
+
+        if start:
+            self.start()
+
+    def start(self):
+        self._running = True
+        self.thread = threading.Thread(target=self.run)
+        self.thread.start()
+
+    def terminate(self):
+
+
+        self._running = False
+        self.thread.join()
+        if hasattr(self, 'pbar') and self.pbar is not None:
+            self.pbar.update(1)
+            self.pbar.close()
+            self.pbar=None
+
+        sys.stdout.flush()
+
+    def run(self):
+        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,
+                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')  # , unit_scale=dt, unit='seconds'):
+
+        for _ in range(self.n-1): # Don't terminate completely; leave bar at 99% done until terminate.
+            if not self._running:
+                self.pbar.close()
+                self.pbar = None
+                break
+
+            time.sleep(self.dt)
+            self.pbar.update(1)
diff --git a/src/unitgrade/unitgrade_grade.py b/src/unitgrade_v1/unitgrade_grade.py
similarity index 100%
rename from src/unitgrade/unitgrade_grade.py
rename to src/unitgrade_v1/unitgrade_grade.py
diff --git a/src/unitgrade_v1/unitgrade_helpers.py b/src/unitgrade_v1/unitgrade_helpers.py
new file mode 100644
index 0000000..a9d8fc1
--- /dev/null
+++ b/src/unitgrade_v1/unitgrade_helpers.py
@@ -0,0 +1,268 @@
+import numpy as np
+from tabulate import tabulate
+from datetime import datetime
+import pyfiglet
+from unitgrade_v1 import Hidden, myround, msum, ActiveProgress
+# import unitgrade_v1
+
+# from unitgrade_v1.unitgrade_v1 import Hidden
+# import unitgrade_v1 as ug
+# import unitgrade_v1.unitgrade_v1 as ug
+import inspect
+import os
+import argparse
+import sys
+import time
+
+#from threading import Thread  # This import presents a problem for the minify-code compression tool.
+
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: 
+To run all tests in a report: 
+
+> python assignment1_dp.py
+
+To run only question 2 or question 2.1
+
+> python assignment1_dp.py -q 2
+> python assignment1_dp.py -q 2.1
+
+Note this scripts does not grade your report. To grade your report, use:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)')
+parser.add_argument('--showexpected',  action="store_true",  help='Show the expected/desired result')
+parser.add_argument('--showcomputed',  action="store_true",  help='Show the answer your code computes')
+parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
+parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
+
+def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):
+    args = parser.parse_args()
+    if question is None and args.q is not None:
+        question = args.q
+        if "." in question:
+            question, qitem = [int(v) for v in question.split(".")]
+        else:
+            question = int(question)
+
+    if not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
+        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")
+
+    if unmute is None:
+        unmute = args.unmute
+    if passall is None:
+        passall = args.passall
+
+    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not args.noprogress, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
+                                          show_tol_err=show_tol_err)
+
+    try:  # For registering stats.
+        import unitgrade_private_v1
+        import irlc.lectures
+        import xlwings
+        from openpyxl import Workbook
+        import pandas as pd
+        from collections import defaultdict
+        dd = defaultdict(lambda: [])
+        error_computed = []
+        for k1, (q, _) in enumerate(report.questions):
+            for k2, item in enumerate(q.items):
+                dd['question_index'].append(k1)
+                dd['item_index'].append(k2)
+                dd['question'].append(q.name)
+                dd['item'].append(item.name)
+                dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol)
+                error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed)
+
+        qstats = report.wdir + "/" + report.name + ".xlsx"
+
+        if os.path.isfile(qstats):
+            d_read = pd.read_excel(qstats).to_dict()
+        else:
+            d_read = dict()
+
+        for k in range(1000):
+            key = 'run_'+str(k)
+            if key in d_read:
+                dd[key] = list(d_read['run_0'].values())
+            else:
+                dd[key] = error_computed
+                break
+
+        workbook = Workbook()
+        worksheet = workbook.active
+        for col, key in enumerate(dd.keys()):
+            worksheet.cell(row=1, column=col+1).value = key
+            for row, item in enumerate(dd[key]):
+                worksheet.cell(row=row+2, column=col+1).value = item
+
+        workbook.save(qstats)
+        workbook.close()
+
+    except ModuleNotFoundError as e:
+        s = 234
+        pass
+
+    if question is None:
+        print("Provisional evaluation")
+        tabulate(table_data)
+        table = table_data
+        print(tabulate(table))
+        print(" ")
+
+    fr = inspect.getouterframes(inspect.currentframe())[1].filename
+    gfile = os.path.basename(fr)[:-3] + "_grade.py"
+    if os.path.exists(gfile):
+        print("Note your results have not yet been registered. \nTo register your results, please run the file:")
+        print(">>>", gfile)
+        print("In the same manner as you ran this file.")
+    return results
+
+
+def upack(q):
+    # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()])
+    h =[(i['w'], i['possible'], i['obtained']) for i in q.values()]
+    h = np.asarray(h)
+    return h[:,0], h[:,1], h[:,2],
+
+
+
+def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
+                    show_progress_bar=True,
+                    show_tol_err=False):
+    from src.snipper.version import __version__
+    now = datetime.now()
+    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
+    b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    print(b + " v" + __version__)
+    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
+    print("Started: " + dt_string)
+    s = report.title
+    if report.version is not None:
+        s += " version " + report.version
+    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")
+    print(f"Loaded answers from: ", report.computed_answers_file, "\n")
+    table_data = []
+    nL = 80
+    t_start = time.time()
+    score = {}
+    for n, (q, w) in enumerate(report.questions):
+        q_hidden = issubclass(q.__class__, Hidden)
+        if question is not None and n+1 != question:
+            continue
+        q_title_print = "Question %i: %s"%(n+1, q.title)
+        print(q_title_print, end="")
+        q.possible = 0
+        q.obtained = 0
+        q_ = {} # Gather score in this class.
+
+        q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
+
+        for j, item in enumerate(q.items):
+            if qitem is not None and question is not None and j+1 != qitem:
+                continue
+
+            if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.
+                # if not item.question.has_called_init_:
+                start = time.time()
+
+                cc = None
+                if show_progress_bar:
+                    total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )
+                    cc = ActiveProgress(t=total_estimated_time, title=q_title_print)
+                with eval('Capturing')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.
+                    try:
+                        for q2 in q_with_outstanding_init:
+                            q2.init()
+                            q2.has_called_init_ = True
+
+                        # item.question.init()  # Initialize the question. Useful for sharing resources.
+                    except Exception as e:
+                        if not passall:
+                            if not silent:
+                                print(" ")
+                                print("="*30)
+                                print(f"When initializing question {q.title} the initialization code threw an error")
+                                print(e)
+                                print("The remaining parts of this question will likely fail.")
+                                print("="*30)
+
+                if show_progress_bar:
+                    cc.terminate()
+                    sys.stdout.flush()
+                    print(q_title_print, end="")
+
+                # item.question.has_called_init_ = True
+                q_time =np.round(  time.time()-start, 2)
+
+                print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")
+                print("=" * nL)
+                q_with_outstanding_init = None
+
+            # item.question = q # Set the parent question instance for later reference.
+            item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)
+
+            if show_progress_bar:
+                cc = ActiveProgress(t=item.estimated_time, title=item_title_print)
+            else:
+                print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="")
+            hidden = issubclass(item.__class__, Hidden)
+            # if not hidden:
+            #     print(ss, end="")
+            # sys.stdout.flush()
+            start = time.time()
+
+            (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)
+            q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title}
+            tsecs = np.round(time.time()-start, 2)
+            if show_progress_bar:
+                cc.terminate()
+                sys.stdout.flush()
+                print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="")
+
+            if not hidden:
+                ss = "PASS" if current == possible else "*** FAILED"
+                if tsecs >= 0.1:
+                    ss += " ("+ str(tsecs) + " seconds)"
+                print(ss)
+
+        ws, possible, obtained = upack(q_)
+        possible = int(ws @ possible)
+        obtained = int(ws @ obtained)
+        obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
+        score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'hidden': q_hidden, 'title': q.title}
+
+        q.obtained = obtained
+        q.possible = possible
+
+        s1 = f"*** Question q{n+1}"
+        s2 = f" {q.obtained}/{w}"
+        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )
+        print(" ")
+        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])
+
+    ws, possible, obtained = upack(score)
+    possible = int( msum(possible) )
+    obtained = int( msum(obtained) ) # Cast to python int
+    report.possible = possible
+    report.obtained = obtained
+    now = datetime.now()
+    dt_string = now.strftime("%H:%M:%S")
+
+    dt = int(time.time()-t_start)
+    minutes = dt//60
+    seconds = dt - minutes*60
+    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
+
+    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")
+
+    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
+    results = {'total': (obtained, possible), 'details': score}
+    return results, table_data
diff --git a/src/unitgrade_v1/version.py b/src/unitgrade_v1/version.py
new file mode 100644
index 0000000..a68927d
--- /dev/null
+++ b/src/unitgrade_v1/version.py
@@ -0,0 +1 @@
+__version__ = "0.1.0"
\ No newline at end of file
-- 
GitLab