diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..335ea9d070ad1c319906aeff798584ded23c7387
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2018 The Python Packaging Authority
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 822df1c4cb8aa8b28c9a1031a6f20c66fafebee6..0000000000000000000000000000000000000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1 +0,0 @@
-include *.dat
diff --git a/README.md b/README.md
index 460eea55cb2d14a0986eb73c86c36613d8b3c4c9..c1a3fc8db454a5e7cb1f1c2078ed4624361ba6a5 100644
--- a/README.md
+++ b/README.md
@@ -4,47 +4,35 @@ Unitgrade is an automatic report and exam evaluation framework that enables inst
  Unitgrade is build on pythons `unittest` framework so that the tests can be specified in a familiar syntax and will integrate with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) and an easy and 100% safe mechanism for verifying the students results and creating additional, hidden tests. A powerful cache system allows instructors to automatically create test-answers based on a working solution. 
 
  - 100% Python `unittest` compatible
- - No external configuration files: Just write a `unittest`
- - No unnatural limitations: Use any package or framework. If you can `unittest` it, it works.   
+ - No external configuration files, just write a `unittest`
+ - No unnatural limitations: If you can `unittest` it, it works.   
  - Granular security model: 
     - Students get public `unittests` for easy development of solutions
     - Students get a tamper-resistant file to create submissions which are uploaded
-    - Instructors can automatically verify the students solution using a Docker VM and run hidden tests
+    - Instructors can automatically verify the students solution using Docker VM and by running hidden tests
+    - Allow export of assignments to Autolab (no `make` file mysteries!)
  - Tests are quick to run and will integrate with your IDE
 
 ## Installation
-Unitgrade can be installed through pip using 
+Unitgrade can be installed using `pip`:
 ```
-pip install git+https://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git
+pip install unitgrade
 ```
 This will install unitgrade in your site-packages directory. If you want to upgrade an old installation of unitgrade:
 ```
-pip install git+https://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade
+pip install unitgrade --upgrade
 ```
 If you are using anaconda+virtual environment you can install it as
 ```
 source activate myenv
 conda install git pip
-pip install git+https://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git
+pip install unitgrade
 ```
-Alternatively, simply use git-clone of the sources and add unitgrade to your python path.
 
 When you are done, you should be able to import unitgrade:
 ```
 import unitgrade
 ```
-## Testing installation
-I have provided an example project which illustrates all main features in a self-contained manner and which should 
-work immediately upon installation. The source can be found here: https://lab.compute.dtu.dk/tuhe/unitgrade/-/tree/master/cs101courseware_example
-To run the example, first start a python console:
-```
-python
-```
-Then run the code
-```
-from cs101courseware_example import instructions
-```
-This will print on-screen instructions for how to use the system tailored to your user-specific installation path.
 
 ## Evaluating a report
 Homework is broken down into **reports**. A report is a collection of questions which are individually scored, and each question may in turn involve multiple tests. Each report is therefore given an overall score based on a weighted average of how many tests are passed.
@@ -83,12 +71,12 @@ To register your results, please run the file:
 >>> cs101report1_grade.py
 In the same manner as you ran this file.
 ```
-Once you are happy with the result, run the alternative, not-easy-to-tamper-with script called `cs101report1_grade.py`:
+Once you are happy with the result run the script with the `_grade.py`-postfix, in this case `cs101report1_grade.py`:
 
 ```
 python cs101report1_grade.py
 ```
-This runs the same tests, and generates a file `Report0_handin_18_of_18.token`. The file name indicates how many points you got. Upload this file to campusnet.
+This runs the same tests, and generates a file `Report0_handin_18_of_18.token`. The file name indicates how many points you got. Upload this file to campusnet (and no other). 
 
 ### Why are there two scripts?
 The reason why we use a standard test script, and one with the `_grade.py` extension, is because the tests should both be easy to debug, but at the same time we have to prevent accidential changes to the test scripts. Hence, we include two versions of the tests.
@@ -97,7 +85,7 @@ The reason why we use a standard test script, and one with the `_grade.py` exten
  - **My non-grade script and the `_grade.py` script gives different number of points**
 Since the two scripts should contain the same code, the reason is nearly certainly that you have made an (accidental) change to the test scripts. Please ensure both scripts are up-to-date and if the problem persists, try to get support.
    
- - **Why is there a `*_resources_do_not_hand_in.dat` file? Should I also upload it?**
+ - **Why is there a `unitgrade` directory with a bunch of pickle files? Should I also upload them?**
 No. The file contains the pre-computed test results your code is compared against. If you want to load this file manually, the unitgrade package contains helpful functions for doing so.
    
  - **I am worried you might think I cheated because I opened the '_grade.py' script/token file**
@@ -135,7 +123,7 @@ Yes.
 That the script `report1_grade.py` is difficult to read is not the principle safety measure. Instead, it ensures there is no accidential tampering. If you muck around with these files and upload the result, we will very likely know.     
 
 - **I have private data on my computer. Will this be read or uploaded?**
-No. The code will look for and upload your solutions, but it will not read/look at other directories in your computer. In the example provided with this code, this means you should expect unitgrade to read/run all files in the `cs101courseware_example`-directory, but **no** other files on your computer (unless some code in this directory load other files). So as long as you keep your private files out of the base courseware directory, you should be fine. 
+No. The code will look for and upload your solutions, but it will not read/look at other directories in your computer. In the example provided with this code, this means you should expect unitgrade to read/run all files in the `cs101courseware_example`-directory, but **no** other files on your computer. So as long as you keep your private files out of the base courseware directory, you should be fine. 
 
 - **Does this code install any spyware/etc.? Does it communicate with a website/online service?**
 No. Unitgrade makes no changes outside the courseware directory and it does not do anything tricky. It reads/runs code and write the `.token` file.
diff --git a/cs101courseware_example/instructions.py b/cs101courseware_example/instructions.py
index b6e149be16a4ead2a4efbc9bf1f431851c31c74b..9786bd41cf05bf6227ea7df1f6fdb5700f11cfdb 100644
--- a/cs101courseware_example/instructions.py
+++ b/cs101courseware_example/instructions.py
@@ -18,7 +18,7 @@ for d in os.listdir(wdir):
     if "__" not in d and d != "instructions.py":
         print("> ", d)
 print("")
-fprint("The file homework1.py is the file you edit as part of the course; you are welcome to open it and inspect the content, but right now it consists of some simple programming tasks plus instructions.")
+fprint("The file looping.py is the file you edit as part of the course; you are welcome to open it and inspect the content, but right now it consists of some simple programming tasks plus instructions.")
 fprint("The file cs101report1.py contains the actual tests of the program. All the tests are easily readable and the script will work with your debugger if you are using pycharm, however, we will run the script for the command line. ")
 fprint("To do so, open a console, and change directory to the cs103 main directory using e.g.:")
 tprint(f'cd "{wdir}"')
diff --git a/dist/unitgrade-0.0.2-py3-none-any.whl b/dist/unitgrade-0.0.2-py3-none-any.whl
new file mode 100644
index 0000000000000000000000000000000000000000..88df3c4dfc76af895eddef0e9e1a54c7a0cc5fc6
Binary files /dev/null and b/dist/unitgrade-0.0.2-py3-none-any.whl differ
diff --git a/dist/unitgrade-0.0.2.tar.gz b/dist/unitgrade-0.0.2.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..24bc625650b200d9016cc5ada901d05c91e0706f
Binary files /dev/null and b/dist/unitgrade-0.0.2.tar.gz differ
diff --git a/dist/unitgrade-0.0.5.tar.gz b/dist/unitgrade-0.0.5.tar.gz
deleted file mode 100644
index debd32cec15ab13d6feb708942d2a2a9f1400e1a..0000000000000000000000000000000000000000
Binary files a/dist/unitgrade-0.0.5.tar.gz and /dev/null differ
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..b5a3c468d9e85e7fa7469c3a90d47b48ab93e54a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+    "setuptools>=42",
+    "wheel"
+]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 29108c8721da4c0e63c8911a5f00825bcb59e976..aaa1240691889b816646206a762bc575d185d5e7 100644
--- a/setup.py
+++ b/setup.py
@@ -1,14 +1,35 @@
-from setuptools import setup
-from unitgrade.version import __version__
-setup(
-    name='unitgrade',
+# Use this guide:
+# https://packaging.python.org/tutorials/packaging-projects/
+
+# from unitgrade2.version import __version__
+import setuptools
+with open("src/unitgrade2/version.py", "r", encoding="utf-8") as fh:
+    __version__ = fh.read().split(" = ")[1].strip()[1:-1]
+# long_description = fh.read()
+
+with open("README.md", "r", encoding="utf-8") as fh:
+    long_description = fh.read()
+
+setuptools.setup(
+    name="unitgrade",
     version=__version__,
-    packages=['unitgrade', 'cs101courseware_example'],
+    author="Tue Herlau",
+    author_email="tuhe@dtu.dk",
+    description="A student homework/exam evaluation framework build on pythons unittest framework.",
+    long_description=long_description,
+    long_description_content_type="text/markdown",
     url='https://lab.compute.dtu.dk/tuhe/unitgrade',
-    license='Apache',
-    author='Tue Herlau',
-    author_email='tuhe@dtu.dk',
-    description='A lightweight student evaluation framework build on unittest',
-    include_package_data=True,
-    install_requires=['numpy', 'jinja2', 'tabulate', 'sklearn', 'compress_pickle', "pyfiglet"],
+    project_urls={
+        "Bug Tracker": "https://lab.compute.dtu.dk/tuhe/unitgrade/issues",
+    },
+    classifiers=[
+        "Programming Language :: Python :: 3",
+        "License :: OSI Approved :: MIT License",
+        "Operating System :: OS Independent",
+    ],
+    package_dir={"": "src"},
+    packages=setuptools.find_packages(where="src"),
+    python_requires=">=3.8",
+    license="MIT",
+    install_requires=['numpy', 'tabulate', 'tqdm', "pyfiglet", "colorama", "coverage"],
 )
diff --git a/unitgrade2/__init__.py b/src/unitgrade2/__init__.py
similarity index 90%
rename from unitgrade2/__init__.py
rename to src/unitgrade2/__init__.py
index 23b1c9d945462ead9ebec34c1c4da50e8906cf24..d62d54decd33ce4cefbb5f107f0b988d4a5266af 100644
--- a/unitgrade2/__init__.py
+++ b/src/unitgrade2/__init__.py
@@ -1,4 +1,3 @@
-from unitgrade2.version import __version__
 import os
 
 # DONT't import stuff here since install script requires __version__
@@ -34,4 +33,4 @@ def cache_read(file_name):
     else:
         return None
 
-from unitgrade2.unitgrade2 import Hidden, myround, mfloor, msum, Capturing, ActiveProgress
+from unitgrade2.unitgrade2 import myround, mfloor, msum, Capturing, ActiveProgress
diff --git a/src/unitgrade2/__pycache__/__init__.cpython-38.pyc b/src/unitgrade2/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4429c41fb31c60f6c1074d20620caf2572fce98e
Binary files /dev/null and b/src/unitgrade2/__pycache__/__init__.cpython-38.pyc differ
diff --git a/src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc b/src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f772f63e4a0c6727d15b104e6cf981814c53043
Binary files /dev/null and b/src/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc differ
diff --git a/src/unitgrade2/unitgrade2.py b/src/unitgrade2/unitgrade2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0290ca25ba2a0db9faa00ce5b69b842e0e57ea6
--- /dev/null
+++ b/src/unitgrade2/unitgrade2.py
@@ -0,0 +1,705 @@
+"""
+git add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade
+"""
+import numpy as np
+import sys
+import re
+import threading
+import tqdm
+import pickle
+import os
+from io import StringIO
+import io
+from unittest.runner import _WritelnDecorator
+from typing import Any
+import inspect
+import textwrap
+import colorama
+from colorama import Fore
+from functools import _make_key, RLock
+from collections import namedtuple
+import unittest
+import time
+
+_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
+
+colorama.init(autoreset=True)  # auto resets your settings after every output
+
+def gprint(s):
+    print(f"{Fore.GREEN}{s}")
+
+myround = lambda x: np.round(x)  # required.
+msum = lambda x: sum(x)
+mfloor = lambda x: np.floor(x)
+
+
+def setup_dir_by_class(C, base_dir):
+    name = C.__class__.__name__
+    return base_dir, name
+
+
+class Logger(object):
+    def __init__(self, buffer):
+        assert False
+        self.terminal = sys.stdout
+        self.log = buffer
+
+    def write(self, message):
+        self.terminal.write(message)
+        self.log.write(message)
+
+    def flush(self):
+        # this flush method is needed for python 3 compatibility.
+        pass
+
+
+class Capturing(list):
+    def __init__(self, *args, stdout=None, unmute=False, **kwargs):
+        self._stdout = stdout
+        self.unmute = unmute
+        super().__init__(*args, **kwargs)
+
+    def __enter__(self, capture_errors=True):  # don't put arguments here.
+        self._stdout = sys.stdout if self._stdout == None else self._stdout
+        self._stringio = StringIO()
+        if self.unmute:
+            sys.stdout = Logger(self._stringio)
+        else:
+            sys.stdout = self._stringio
+
+        if capture_errors:
+            self._sterr = sys.stderr
+            sys.sterr = StringIO()  # memory hole it
+        self.capture_errors = capture_errors
+        return self
+
+    def __exit__(self, *args):
+        self.extend(self._stringio.getvalue().splitlines())
+        del self._stringio  # free up some memory
+        sys.stdout = self._stdout
+        if self.capture_errors:
+            sys.sterr = self._sterr
+
+
+class Capturing2(Capturing):
+    def __exit__(self, *args):
+        lines = self._stringio.getvalue().splitlines()
+        txt = "\n".join(lines)
+        numbers = extract_numbers(txt)
+        self.extend(lines)
+        del self._stringio  # free up some memory
+        sys.stdout = self._stdout
+        if self.capture_errors:
+            sys.sterr = self._sterr
+
+        self.output = txt
+        self.numbers = numbers
+
+
+# @classmethod
+# class OrderedClassMembers(type):
+#     def __prepare__(self, name, bases):
+#         assert False
+#         return collections.OrderedDict()
+#
+#     def __new__(self, name, bases, classdict):
+#         ks = list(classdict.keys())
+#         for b in bases:
+#             ks += b.__ordered__
+#         classdict['__ordered__'] = [key for key in ks if key not in ('__module__', '__qualname__')]
+#         return type.__new__(self, name, bases, classdict)
+
+
+class Report:
+    title = "report title"
+    version = None
+    questions = []
+    pack_imports = []
+    individual_imports = []
+    nL = 120  # Maximum line width
+
+    @classmethod
+    def reset(cls):
+        for (q, _) in cls.questions:
+            if hasattr(q, 'reset'):
+                q.reset()
+
+    @classmethod
+    def mfile(clc):
+        return inspect.getfile(clc)
+
+    def _file(self):
+        return inspect.getfile(type(self))
+
+    def _import_base_relative(self):
+        if hasattr(self.pack_imports[0], '__path__'):
+            root_dir = self.pack_imports[0].__path__._path[0]
+        else:
+            root_dir = self.pack_imports[0].__file__
+
+        root_dir = os.path.dirname(root_dir)
+        relative_path = os.path.relpath(self._file(), root_dir)
+        modules = os.path.normpath(relative_path[:-3]).split(os.sep)
+        return root_dir, relative_path, modules
+
+    def __init__(self, strict=False, payload=None):
+        working_directory = os.path.abspath(os.path.dirname(self._file()))
+        self.wdir, self.name = setup_dir_by_class(self, working_directory)
+        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
+        for (q, _) in self.questions:
+            q.nL = self.nL  # Set maximum line length.
+
+        if payload is not None:
+            self.set_payload(payload, strict=strict)
+
+    def main(self, verbosity=1):
+        # Run all tests using standard unittest (nothing fancy).
+        loader = unittest.TestLoader()
+        for q, _ in self.questions:
+            start = time.time()  # A good proxy for setup time is to
+            suite = loader.loadTestsFromTestCase(q)
+            unittest.TextTestRunner(verbosity=verbosity).run(suite)
+            total = time.time() - start
+            q.time = total
+
+    def _setup_answers(self, with_coverage=False):
+        if with_coverage:
+            for q, _ in self.questions:
+                q._with_coverage = True
+                q._report = self
+
+        self.main()  # Run all tests in class just to get that out of the way...
+        report_cache = {}
+        for q, _ in self.questions:
+            # print(self.questions)
+            if hasattr(q, '_save_cache'):
+                q()._save_cache()
+                print("q is", q())
+                q()._cache_put('time', q.time) # = q.time
+                report_cache[q.__qualname__] = q._cache2
+            else:
+                report_cache[q.__qualname__] = {'no cache see _setup_answers in unitgrade2.py': True}
+        if with_coverage:
+            for q, _ in self.questions:
+                q._with_coverage = False
+        return report_cache
+
+    def set_payload(self, payloads, strict=False):
+        for q, _ in self.questions:
+            q._cache = payloads[q.__qualname__]
+
+
+def rm_progress_bar(txt):
+    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.
+    nlines = []
+    for l in txt.splitlines():
+        pct = l.find("%")
+        ql = False
+        if pct > 0:
+            i = l.find("|", pct + 1)
+            if i > 0 and l.find("|", i + 1) > 0:
+                ql = True
+        if not ql:
+            nlines.append(l)
+    return "\n".join(nlines)
+
+
+def extract_numbers(txt):
+    # txt = rm_progress_bar(txt)
+    numeric_const_pattern = r'[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
+    rx = re.compile(numeric_const_pattern, re.VERBOSE)
+    all = rx.findall(txt)
+    all = [float(a) if ('.' in a or "e" in a) else int(a) for a in all]
+    if len(all) > 500:
+        print(txt)
+        raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))
+    return all
+
+
+class ActiveProgress():
+    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):
+        if file == None:
+            file = sys.stdout
+        self.file = file
+        self.t = t
+        self._running = False
+        self.title = title
+        self.dt = 0.01
+        self.n = int(np.round(self.t / self.dt))
+        self.show_progress_bar = show_progress_bar
+        self.pbar = None
+
+        if start:
+            self.start()
+
+    def start(self):
+        self._running = True
+        if self.show_progress_bar:
+            self.thread = threading.Thread(target=self.run)
+            self.thread.start()
+        self.time_started = time.time()
+
+    def terminate(self):
+        if not self._running:
+            raise Exception("Stopping a stopped progress bar. ")
+        self._running = False
+        if self.show_progress_bar:
+            self.thread.join()
+        if self.pbar is not None:
+            self.pbar.update(1)
+            self.pbar.close()
+            self.pbar = None
+
+        self.file.flush()
+        return time.time() - self.time_started
+
+    def run(self):
+        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,
+                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')
+
+        for _ in range(self.n - 1):  # Don't terminate completely; leave bar at 99% done until terminate.
+            if not self._running:
+                self.pbar.close()
+                self.pbar = None
+                break
+
+            time.sleep(self.dt)
+            self.pbar.update(1)
+
+def dprint(first, last, nL, extra = "", file=None, dotsym='.', color='white'):
+    if file == None:
+        file = sys.stdout
+
+    # ss = self.item_title_print
+    # state = "PASS" if success else "FAILED"
+    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))
+    # if self.show_progress_bar or True:
+    print(first + dot_parts, end="", file=file)
+    # else:
+    # print(dot_parts, end="", file=self.cc.file)
+    last += extra
+    # if tsecs >= 0.5:
+    #     state += " (" + str(tsecs) + " seconds)"
+    print(last, file=file)
+
+
+class UTextResult(unittest.TextTestResult):
+    nL = 80
+    number = -1  # HAcky way to set question number.
+    show_progress_bar = True
+    cc = None
+
+    def __init__(self, stream, descriptions, verbosity):
+        super().__init__(stream, descriptions, verbosity)
+        self.successes = []
+
+    def printErrors(self) -> None:
+        self.printErrorList('ERROR', self.errors)
+        self.printErrorList('FAIL', self.failures)
+
+    def addError(self, test, err):
+        super(unittest.TextTestResult, self).addFailure(test, err)
+        self.cc_terminate(success=False)
+
+    def addFailure(self, test, err):
+        super(unittest.TextTestResult, self).addFailure(test, err)
+        self.cc_terminate(success=False)
+
+    def addSuccess(self, test: unittest.case.TestCase) -> None:
+        self.successes.append(test)
+        self.cc_terminate()
+
+    def cc_terminate(self, success=True):
+        if self.show_progress_bar or True:
+            tsecs = np.round(self.cc.terminate(), 2)
+            self.cc.file.flush()
+            ss = self.item_title_print
+
+            state = "PASS" if success else "FAILED"
+
+            dot_parts = ('.' * max(0, self.nL - len(state) - len(ss)))
+            if self.show_progress_bar or True:
+                print(self.item_title_print + dot_parts, end="", file=self.cc.file)
+            else:
+                print(dot_parts, end="", file=self.cc.file)
+
+            if tsecs >= 0.5:
+                state += " (" + str(tsecs) + " seconds)"
+            print(state, file=self.cc.file)
+
+    def startTest(self, test):
+        # j =self.testsRun
+        self.testsRun += 1
+        # item_title = self.getDescription(test)
+        item_title = test.shortDescription()  # Better for printing (get from cache).
+        if item_title == None:
+            # For unittest framework where getDescription may return None.
+            item_title = self.getDescription(test)
+        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)
+        estimated_time = 10
+        if self.show_progress_bar or True:
+            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)
+        else:
+            print(self.item_title_print + ('.' * max(0, self.nL - 4 - len(self.item_title_print))), end="")
+
+        self._test = test
+        self._stdout = sys.stdout
+        sys.stdout = io.StringIO()
+
+    def stopTest(self, test):
+        sys.stdout = self._stdout
+        super().stopTest(test)
+
+    def _setupStdout(self):
+        if self._previousTestClass == None:
+            total_estimated_time = 1
+            if hasattr(self.__class__, 'q_title_print'):
+                q_title_print = self.__class__.q_title_print
+            else:
+                q_title_print = "<unnamed test. See unitgrade.py>"
+
+            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)
+            self.cc = cc
+
+    def _restoreStdout(self):  # Used when setting up the test.
+        if self._previousTestClass is None:
+            q_time = self.cc.terminate()
+            q_time = np.round(q_time, 2)
+            sys.stdout.flush()
+            if self.show_progress_bar:
+                print(self.cc.title, end="")
+            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))
+
+
+class UTextTestRunner(unittest.TextTestRunner):
+    def __init__(self, *args, **kwargs):
+        stream = io.StringIO()
+        super().__init__(*args, stream=stream, **kwargs)
+
+    def _makeResult(self):
+        # stream = self.stream # not you!
+        stream = sys.stdout
+        stream = _WritelnDecorator(stream)
+        return self.resultclass(stream, self.descriptions, self.verbosity)
+
+
+def cache(foo, typed=False):
+    """ Magic cache wrapper
+    https://github.com/python/cpython/blob/main/Lib/functools.py
+    """
+    maxsize = None
+    def wrapper(self, *args, **kwargs):
+        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))
+        if not self._cache_contains(key):
+            value = foo(self, *args, **kwargs)
+            self._cache_put(key, value)
+        else:
+            value = self._cache_get(key)
+        return value
+
+    return wrapper
+
+
+def get_hints(ss):
+    if ss == None:
+        return None
+    try:
+        ss = textwrap.dedent(ss)
+        ss = ss.replace('''"""''', "").strip()
+        hints = ["hints:", ]
+        j = np.argmax([ss.lower().find(h) for h in hints])
+        h = hints[j]
+        ss = ss[ss.find(h) + len(h) + 1:]
+        ss = "\n".join([l for l in ss.split("\n") if not l.strip().startswith(":")])
+        ss = textwrap.dedent(ss)
+        ss = ss.strip()
+        return ss
+    except Exception as e:
+        print("bad hints", ss, e)
+
+
+class UTestCase(unittest.TestCase):
+    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.
+    _cache = None  # Read-only cache. Ensures method always produce same result.
+    _cache2 = None  # User-written cache.
+    _with_coverage = False
+    _report = None  # The report used. This is very, very hacky and should always be None. Don't rely on it!
+
+    def capture(self):
+        if hasattr(self, '_stdout') and self._stdout is not None:
+            file = self._stdout
+        else:
+            # self._stdout = sys.stdout
+            # sys._stdout = io.StringIO()
+            file = sys.stdout
+        return Capturing2(stdout=file)
+
+    @classmethod
+    def question_title(cls):
+        """ Return the question title """
+        return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__
+
+    @classmethod
+    def reset(cls):
+        print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")
+        cls._outcome = None
+        cls._cache = None
+        cls._cache2 = None
+
+    def _callSetUp(self):
+        if self._with_coverage:
+            if not hasattr(self._report, 'covcache'):
+                self._report.covcache = {}
+            import coverage
+            self.cov = coverage.Coverage()
+            self.cov.start()
+        self.setUp()
+
+    def _callTearDown(self):
+        self.tearDown()
+        if self._with_coverage:
+            from pathlib import Path
+            from snipper import snipper
+            self.cov.stop()
+            data = self.cov.get_data()
+            base, _, _ = self._report._import_base_relative()
+            for file in data.measured_files():
+                file = os.path.normpath(file)
+                root = Path(base)
+                child = Path(file)
+                if root in child.parents:
+                    with open(child, 'r') as f:
+                        s = f.read()
+                    lines = s.splitlines()
+                    garb = 'GARBAGE'
+
+                    lines2 = snipper.censor_code(lines, keep=True)
+                    assert len(lines) == len(lines2)
+
+                    for l in data.contexts_by_lineno(file):
+                        if lines2[l].strip() == garb:
+                            if self.cache_id() not in self._report.covcache:
+                                self._report.covcache[self.cache_id()] = {}
+
+                            rel = os.path.relpath(child, root)
+                            cc = self._report.covcache[self.cache_id()]
+                            j = 0
+                            for j in range(l, -1, -1):
+                                if "def" in lines2[j] or "class" in lines2[j]:
+                                    break
+                            from snipper.snipper import gcoms
+                            fun = lines2[j]
+                            comments, _ = gcoms("\n".join(lines2[j:l]))
+                            if rel not in cc:
+                                cc[rel] = {}
+                            cc[rel][fun] = (l, "\n".join(comments))
+                            self._cache_put((self.cache_id(), 'coverage'), self._report.covcache)
+
+    def shortDescriptionStandard(self):
+        sd = super().shortDescription()
+        if sd is None:
+            sd = self._testMethodName
+        return sd
+
+    def shortDescription(self):
+        sd = self.shortDescriptionStandard()
+        title = self._cache_get((self.cache_id(), 'title'), sd)
+        return title if title is not None else sd
+
+    @property
+    def title(self):
+        return self.shortDescription()
+
+    @title.setter
+    def title(self, value):
+        self._cache_put((self.cache_id(), 'title'), value)
+
+    def _get_outcome(self):
+        if not (self.__class__, '_outcome') or self.__class__._outcome is None:
+            self.__class__._outcome = {}
+        return self.__class__._outcome
+
+    def _callTestMethod(self, testMethod):
+        t = time.time()
+        self._ensure_cache_exists()  # Make sure cache is there.
+        if self._testMethodDoc is not None:
+            self._cache_put((self.cache_id(), 'title'), self.shortDescriptionStandard())
+
+        self._cache2[(self.cache_id(), 'assert')] = {}
+        res = testMethod()
+        elapsed = time.time() - t
+        self._get_outcome()[self.cache_id()] = res
+        self._cache_put((self.cache_id(), "time"), elapsed)
+
+    def cache_id(self):
+        c = self.__class__.__qualname__
+        m = self._testMethodName
+        return c, m
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._load_cache()
+        self._assert_cache_index = 0
+
+    def _ensure_cache_exists(self):
+        if not hasattr(self.__class__, '_cache') or self.__class__._cache == None:
+            self.__class__._cache = dict()
+        if not hasattr(self.__class__, '_cache2') or self.__class__._cache2 == None:
+            self.__class__._cache2 = dict()
+
+    def _cache_get(self, key, default=None):
+        self._ensure_cache_exists()
+        return self.__class__._cache.get(key, default)
+
+    def _cache_put(self, key, value):
+        self._ensure_cache_exists()
+        self.__class__._cache2[key] = value
+
+    def _cache_contains(self, key):
+        self._ensure_cache_exists()
+        return key in self.__class__._cache
+
+    def wrap_assert(self, assert_fun, first, *args, **kwargs):
+        # sys.stdout = self._stdout
+        key = (self.cache_id(), 'assert')
+        if not self._cache_contains(key):
+            print("Warning, framework missing", key)
+            self.__class__._cache[
+                key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.
+        cache = self._cache_get(key)
+        id = self._assert_cache_index
+        if not id in cache:
+            print("Warning, framework missing cache index", key, "id =", id)
+        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")
+
+        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.
+        cache[id] = first
+        self._cache_put(key, cache)
+        self._assert_cache_index += 1
+        assert_fun(first, _expected, *args, **kwargs)
+
+    def assertEqualC(self, first: Any, msg: Any = ...) -> None:
+        self.wrap_assert(self.assertEqual, first, msg)
+
+    def _cache_file(self):
+        return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"
+
+    def _save_cache(self):
+        # get the class name (i.e. what to save to).
+        cfile = self._cache_file()
+        if not os.path.isdir(os.path.dirname(cfile)):
+            os.makedirs(os.path.dirname(cfile))
+
+        if hasattr(self.__class__, '_cache2'):
+            with open(cfile, 'wb') as f:
+                pickle.dump(self.__class__._cache2, f)
+
+    # But you can also set cache explicitly.
+    def _load_cache(self):
+        if self._cache is not None:  # Cache already loaded. We will not load it twice.
+            return
+            # raise Exception("Loaded cache which was already set. What is going on?!")
+        cfile = self._cache_file()
+        if os.path.exists(cfile):
+            try:
+                with open(cfile, 'rb') as f:
+                    data = pickle.load(f)
+                self.__class__._cache = data
+            except Exception as e:
+                print("Bad cache", cfile)
+                print(e)
+        else:
+            print("Warning! data file not found", cfile)
+
+    def _feedErrorsToResult(self, result, errors):
+        """ Use this to show hints on test failure. """
+        if not isinstance(result, UTextResult):
+            er = [e for e, v in errors if v != None]
+
+            if len(er) > 0:
+                hints = []
+                key = (self.cache_id(), 'coverage')
+                if self._cache_contains(key):
+                    CC = self._cache_get(key)
+                    for id in CC:
+                        if id == self.cache_id():
+                            cl, m = id
+                            gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:")  # For the test {id} in {file} you should edit:")
+                            for file in CC[id]:
+                                rec = CC[id][file]
+                                gprint(f">   * {file}")
+                                for l in rec:
+                                    _, comments = CC[id][file][l]
+                                    hint = get_hints(comments)
+
+                                    if hint != None:
+                                        hints.append(hint)
+                                    gprint(f">      - {l}")
+
+                er = er[0]
+                doc = er._testMethodDoc
+                if doc is not None:
+                    hint = get_hints(er._testMethodDoc)
+                    if hint is not None:
+                        hints = [hint] + hints
+                if len(hints) > 0:
+                    gprint("> Hints:")
+                    gprint(textwrap.indent("\n".join(hints), ">   "))
+
+        super()._feedErrorsToResult(result, errors)
+
+    def startTestRun(self):
+        # print("asdfsdaf 11", file=sys.stderr)
+        super().startTestRun()
+        # print("asdfsdaf")
+
+    def _callTestMethod(self, method):
+        # print("asdfsdaf")
+        super()._callTestMethod(method)
+
+
+def hide(func):
+    return func
+
+
+def makeRegisteringDecorator(foreignDecorator):
+    """
+        Returns a copy of foreignDecorator, which is identical in every
+        way(*), except also appends a .decorator property to the callable it
+        spits out.
+    """
+
+    def newDecorator(func):
+        # Call to newDecorator(method)
+        # Exactly like old decorator, but output keeps track of what decorated it
+        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done
+        R.decorator = newDecorator  # keep track of decorator
+        # R.original = func         # might as well keep track of everything!
+        return R
+
+    newDecorator.__name__ = foreignDecorator.__name__
+    newDecorator.__doc__ = foreignDecorator.__doc__
+    return newDecorator
+
+hide = makeRegisteringDecorator(hide)
+
+def methodsWithDecorator(cls, decorator):
+    """
+        Returns all methods in CLS with DECORATOR as the
+        outermost decorator.
+
+        DECORATOR must be a "registering decorator"; one
+        can make any decorator "registering" via the
+        makeRegisteringDecorator function.
+
+        import inspect
+        ls = list(methodsWithDecorator(GeneratorQuestion, deco))
+        for f in ls:
+            print(inspect.getsourcelines(f) ) # How to get all hidden questions.
+    """
+    for maybeDecorated in cls.__dict__.values():
+        if hasattr(maybeDecorated, 'decorator'):
+            if maybeDecorated.decorator == decorator:
+                print(maybeDecorated)
+                yield maybeDecorated
+# 817
diff --git a/unitgrade2/unitgrade_helpers2.py b/src/unitgrade2/unitgrade_helpers2.py
similarity index 84%
rename from unitgrade2/unitgrade_helpers2.py
rename to src/unitgrade2/unitgrade_helpers2.py
index 84215620b4a2ee0821eb19d6da4c6f7cb527f7af..d007fd2eab771a2c71add85583c5f067adcee3eb 100644
--- a/unitgrade2/unitgrade_helpers2.py
+++ b/src/unitgrade2/unitgrade_helpers2.py
@@ -2,19 +2,13 @@ import numpy as np
 from tabulate import tabulate
 from datetime import datetime
 import pyfiglet
-from unitgrade2 import Hidden, myround, msum, mfloor, ActiveProgress
-from unitgrade2 import __version__
+from unitgrade2 import msum
 import unittest
-# from unitgrade2.unitgrade2 import MySuite
 from unitgrade2.unitgrade2 import UTextResult
-
 import inspect
 import os
 import argparse
-import sys
 import time
-import threading # don't import Thread bc. of minify issue.
-import tqdm # don't do from tqdm import tqdm because of minify-issue
 
 parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: 
 To run all tests in a report: 
@@ -109,31 +103,27 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
                     show_tol_err=False,
                     big_header=True):
 
-    from unitgrade2.version import __version__
+    from src.unitgrade2.version import __version__
     now = datetime.now()
     if big_header:
         ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
         b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
     else:
         b = "Unitgrade"
-    print(b + " v" + __version__)
     dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
-    print("Started: " + dt_string)
+    print(b + " v" + __version__ + ", started: " + dt_string+ "\n")
+    # print("Started: " + dt_string)
     s = report.title
     if hasattr(report, "version") and report.version is not None:
         s += " version " + report.version
-    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")
+    print(s, "(use --help for options)" if show_help_flag else "")
     # print(f"Loaded answers from: ", report.computed_answers_file, "\n")
     table_data = []
-    nL = 80
     t_start = time.time()
     score = {}
     loader = SequentialTestLoader()
 
     for n, (q, w) in enumerate(report.questions):
-        # q = q()
-        # q_hidden = False
-        # q_hidden = issubclass(q.__class__, Hidden)
         if question is not None and n+1 != question:
             continue
         suite = loader.loadTestsFromTestCase(q)
@@ -143,12 +133,11 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
         q.possible = 0
         q.obtained = 0
         q_ = {} # Gather score in this class.
-        from unitgrade2.unitgrade2 import UTextTestRunner
-        # unittest.Te
-        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
+        from src.unitgrade2.unitgrade2 import UTextTestRunner
         UTextResult.q_title_print = q_title_print # Hacky
         UTextResult.show_progress_bar = show_progress_bar # Hacky.
         UTextResult.number = n
+        UTextResult.nL = report.nL
 
         res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
 
@@ -157,20 +146,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
 
         assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
 
-        # possible = int(ws @ possible)
-        # obtained = int(ws @ obtained)
-        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
-
         obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
         score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle}
         q.obtained = obtained
         q.possible = possible
 
-        s1 = f"*** Question q{n+1}"
+        s1 = f" * q{n+1})   Total"
         s2 = f" {q.obtained}/{w}"
-        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )
+        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )
         print(" ")
-        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])
+        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])
 
     ws, possible, obtained = upack(score)
     possible = int( msum(possible) )
@@ -185,10 +170,12 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
     seconds = dt - minutes*60
     plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
 
-    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")
+    from src.unitgrade2.unitgrade2 import dprint
+    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",
+           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)
+
+    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")
 
     table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
     results = {'total': (obtained, possible), 'details': score}
     return results, table_data
-
-
diff --git a/src/unitgrade2/version.py b/src/unitgrade2/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0235ce508e8df790674398c102bb5b4a0ca7fa0
--- /dev/null
+++ b/src/unitgrade2/version.py
@@ -0,0 +1 @@
+__version__ = "0.0.2"
\ No newline at end of file
diff --git a/unitgrade.egg-info/PKG-INFO b/unitgrade.egg-info/PKG-INFO
deleted file mode 100644
index cc1c911f9637b93b3090729858660aefac67684e..0000000000000000000000000000000000000000
--- a/unitgrade.egg-info/PKG-INFO
+++ /dev/null
@@ -1,10 +0,0 @@
-Metadata-Version: 1.0
-Name: unitgrade
-Version: 0.0.5
-Summary: A lightweight student evaluation framework build on unittest
-Home-page: https://lab.compute.dtu.dk/tuhe/unitgrade
-Author: Tue Herlau
-Author-email: tuhe@dtu.dk
-License: Apache
-Description: UNKNOWN
-Platform: UNKNOWN
diff --git a/unitgrade.egg-info/SOURCES.txt b/unitgrade.egg-info/SOURCES.txt
deleted file mode 100644
index 9026e790b0b95cfab15eedfebe87e5688e4987ac..0000000000000000000000000000000000000000
--- a/unitgrade.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MANIFEST.in
-README.md
-setup.py
-cs101courseware_example/Report0_resources_do_not_hand_in.dat
-cs101courseware_example/Report1_resources_do_not_hand_in.dat
-cs101courseware_example/Report2_resources_do_not_hand_in.dat
-cs101courseware_example/__init__.py
-cs101courseware_example/cs101report1.py
-cs101courseware_example/cs101report1_grade.py
-cs101courseware_example/cs101report2.py
-cs101courseware_example/cs101report2_grade.py
-cs101courseware_example/homework1.py
-cs101courseware_example/instructions.py
-unitgrade/__init__.py
-unitgrade/unitgrade.py
-unitgrade/unitgrade_helpers.py
-unitgrade.egg-info/PKG-INFO
-unitgrade.egg-info/SOURCES.txt
-unitgrade.egg-info/dependency_links.txt
-unitgrade.egg-info/requires.txt
-unitgrade.egg-info/top_level.txt
\ No newline at end of file
diff --git a/unitgrade.egg-info/dependency_links.txt b/unitgrade.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/unitgrade.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/unitgrade.egg-info/requires.txt b/unitgrade.egg-info/requires.txt
deleted file mode 100644
index b8b3f8562c3026c448236d6db810823983abe8f0..0000000000000000000000000000000000000000
--- a/unitgrade.egg-info/requires.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-jinja2
-tabulate
-sklearn
-compress_pickle
diff --git a/unitgrade.egg-info/top_level.txt b/unitgrade.egg-info/top_level.txt
deleted file mode 100644
index 5f808ad416acfad86173979aead2d15282004568..0000000000000000000000000000000000000000
--- a/unitgrade.egg-info/top_level.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-cs101courseware_example
-unitgrade
diff --git a/unitgrade/Report_resources_do_not_hand_in.dat b/unitgrade/Report_resources_do_not_hand_in.dat
deleted file mode 100644
index 9c15fa98decdca1da9d38c1989086f8c4c1ce960..0000000000000000000000000000000000000000
Binary files a/unitgrade/Report_resources_do_not_hand_in.dat and /dev/null differ
diff --git a/unitgrade/version.py b/unitgrade/version.py
index a23ef3f48b85172c0ab83ad6a8b4cea4fd584c04..a68927d6ca950577d845cea16247b0aee681c39f 100644
--- a/unitgrade/version.py
+++ b/unitgrade/version.py
@@ -1 +1 @@
-__version__ = "0.1.8"
\ No newline at end of file
+__version__ = "0.1.0"
\ No newline at end of file
diff --git a/unitgrade2/__pycache__/__init__.cpython-38.pyc b/unitgrade2/__pycache__/__init__.cpython-38.pyc
deleted file mode 100644
index ac7b672193da207e651f61b4bee3e1791e27a656..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/__init__.cpython-38.pyc and /dev/null differ
diff --git a/unitgrade2/__pycache__/__init__.cpython-39.pyc b/unitgrade2/__pycache__/__init__.cpython-39.pyc
deleted file mode 100644
index e61f9803216fe17bf58c02cb47dd2fafcfca4760..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/__init__.cpython-39.pyc and /dev/null differ
diff --git a/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc b/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc
deleted file mode 100644
index 3492c9eca22889350f2a5707f0579dc18533a9c0..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/unitgrade2.cpython-38.pyc and /dev/null differ
diff --git a/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc b/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc
deleted file mode 100644
index efd8ce6f58cc6a9f0d3cd6104d4cbe6e9982131d..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/unitgrade_helpers2.cpython-38.pyc and /dev/null differ
diff --git a/unitgrade2/__pycache__/version.cpython-38.pyc b/unitgrade2/__pycache__/version.cpython-38.pyc
deleted file mode 100644
index 1e804a3c2f2261406205db1c73302a4acb70b698..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/version.cpython-38.pyc and /dev/null differ
diff --git a/unitgrade2/__pycache__/version.cpython-39.pyc b/unitgrade2/__pycache__/version.cpython-39.pyc
deleted file mode 100644
index 613bef2848d43d5e78803c81d3d366c27a5e2c62..0000000000000000000000000000000000000000
Binary files a/unitgrade2/__pycache__/version.cpython-39.pyc and /dev/null differ
diff --git a/unitgrade2/unitgrade/ListQuestion.pkl b/unitgrade2/unitgrade/ListQuestion.pkl
deleted file mode 100644
index b201d4b453d94b66ed44ac821d72534a90b09811..0000000000000000000000000000000000000000
Binary files a/unitgrade2/unitgrade/ListQuestion.pkl and /dev/null differ
diff --git a/unitgrade2/unitgrade2.py b/unitgrade2/unitgrade2.py
deleted file mode 100644
index c094340817a5b2014b4821b5b306dad9ebfc69ca..0000000000000000000000000000000000000000
--- a/unitgrade2/unitgrade2.py
+++ /dev/null
@@ -1,891 +0,0 @@
-"""
-git add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade
-
-"""
-# from . import cache_read
-import unittest
-import numpy as np
-import sys
-from io import StringIO
-import collections
-import re
-import threading
-import tqdm
-import time
-import pickle
-import itertools
-import os
-
-myround = lambda x: np.round(x)  # required.
-msum = lambda x: sum(x)
-mfloor = lambda x: np.floor(x)
-
-def setup_dir_by_class(C,base_dir):
-    name = C.__class__.__name__
-    # base_dir = os.path.join(base_dir, name)
-    # if not os.path.isdir(base_dir):
-    #     os.makedirs(base_dir)
-    return base_dir, name
-
-class Hidden:
-    def hide(self):
-        return True
-
-class Logger(object):
-    def __init__(self, buffer):
-        self.terminal = sys.stdout
-        self.log = buffer
-
-    def write(self, message):
-        self.terminal.write(message)
-        self.log.write(message)
-
-    def flush(self):
-        # this flush method is needed for python 3 compatibility.
-        pass
-
-class Capturing(list):
-    def __init__(self, *args, stdout=None, unmute=False, **kwargs):
-        self._stdout = stdout
-        self.unmute = unmute
-        super().__init__(*args, **kwargs)
-
-    def __enter__(self, capture_errors=True): # don't put arguments here.
-        self._stdout = sys.stdout if self._stdout == None else self._stdout
-        self._stringio = StringIO()
-        if self.unmute:
-            sys.stdout = Logger(self._stringio)
-        else:
-            sys.stdout = self._stringio
-
-        if capture_errors:
-            self._sterr = sys.stderr
-            sys.sterr = StringIO() # memory hole it
-        self.capture_errors = capture_errors
-        return self
-
-    def __exit__(self, *args):
-        self.extend(self._stringio.getvalue().splitlines())
-        del self._stringio    # free up some memory
-        sys.stdout = self._stdout
-        if self.capture_errors:
-            sys.sterr = self._sterr
-
-class Capturing2(Capturing):
-    def __exit__(self, *args):
-        lines = self._stringio.getvalue().splitlines()
-        txt = "\n".join(lines)
-        numbers = extract_numbers(txt)
-        self.extend(lines)
-        del self._stringio    # free up some memory
-        sys.stdout = self._stdout
-        if self.capture_errors:
-            sys.sterr = self._sterr
-
-        self.output = txt
-        self.numbers = numbers
-
-
-class QItem(unittest.TestCase):
-    title = None
-    testfun = None
-    tol = 0
-    estimated_time = 0.42
-    _precomputed_payload = None
-    _computed_answer = None # Internal helper to later get results.
-    weight = 1 # the weight of the question.
-
-    def __init__(self, question=None, *args, **kwargs):
-        if self.tol > 0 and self.testfun is None:
-            self.testfun = self.assertL2Relative
-        elif self.testfun is None:
-            self.testfun = self.assertEqual
-
-        self.name = self.__class__.__name__
-        # self._correct_answer_payload = correct_answer_payload
-        self.question = question
-
-        super().__init__(*args, **kwargs)
-        if self.title is None:
-            self.title = self.name
-
-    def _safe_get_title(self):
-        if self._precomputed_title is not None:
-            return self._precomputed_title
-        return self.title
-
-    def assertNorm(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )
-        nrm = np.sqrt(np.sum( diff ** 2))
-
-        self.error_computed = nrm
-
-        if nrm > tol:
-            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
-
-    def assertL2(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
-        self.error_computed = np.max(diff)
-
-        if np.max(diff) > tol:
-            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")
-
-    def assertL2Relative(self, computed, expected, tol=None):
-        if tol == None:
-            tol = self.tol
-        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )
-        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )
-        self.error_computed = np.max(np.abs(diff))
-        if np.sum(diff > tol) > 0:
-            print(f"Not equal within tolerance {tol}")
-            print(f"Element-wise differences {diff.tolist()}")
-            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")
-
-    def precomputed_payload(self):
-        return self._precomputed_payload
-
-    def precompute_payload(self):
-        # Pre-compute resources to include in tests (useful for getting around rng).
-        pass
-
-    def compute_answer(self, unmute=False):
-        raise NotImplementedError("test code here")
-
-    def test(self, computed, expected):
-        self.testfun(computed, expected)
-
-    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):
-        possible = 1
-        computed = None
-        def show_computed_(computed):
-            print(">>> Your output:")
-            print(computed)
-
-        def show_expected_(expected):
-            print(">>> Expected output (note: may have been processed; read text script):")
-            print(expected)
-
-        correct = self._correct_answer_payload
-        try:
-            if unmute: # Required to not mix together print stuff.
-                print("")
-            computed = self.compute_answer(unmute=unmute)
-        except Exception as e:
-            if not passall:
-                if not silent:
-                    print("\n=================================================================================")
-                    print(f"When trying to run test class '{self.name}' your code threw an error:", e)
-                    show_expected_(correct)
-                    import traceback
-                    print(traceback.format_exc())
-                    print("=================================================================================")
-                return (0, possible)
-
-        if self._computed_answer is None:
-            self._computed_answer = computed
-
-        if show_expected or show_computed:
-            print("\n")
-        if show_expected:
-            show_expected_(correct)
-        if show_computed:
-            show_computed_(computed)
-        try:
-            if not passall:
-                self.test(computed=computed, expected=correct)
-        except Exception as e:
-            if not silent:
-                print("\n=================================================================================")
-                print(f"Test output from test class '{self.name}' does not match expected result. Test error:")
-                print(e)
-                show_computed_(computed)
-                show_expected_(correct)
-            return (0, possible)
-        return (1, possible)
-
-    def score(self):
-        try:
-            self.test()
-        except Exception as e:
-            return 0
-        return 1
-
-class QPrintItem(QItem):
-    def compute_answer_print(self):
-        """
-        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values
-        are send to process_output (see compute_answer below). In other words, the text generated is:
-
-        res = compute_Answer_print()
-        txt = (any terminal output generated above)
-        numbers = (any numbers found in terminal-output txt)
-
-        self.test(process_output(res, txt, numbers), <expected result>)
-
-        :return: Optional values for comparison
-        """
-        raise Exception("Generate output here. The output is passed to self.process_output")
-
-    def process_output(self, res, txt, numbers):
-        return res
-
-    def compute_answer(self, unmute=False):
-        with Capturing(unmute=unmute) as output:
-            res = self.compute_answer_print()
-        s = "\n".join(output)
-        s = rm_progress_bar(s) # Remove progress bar.
-        numbers = extract_numbers(s)
-        self._computed_answer = (res, s, numbers)
-        return self.process_output(res, s, numbers)
-
-class OrderedClassMembers(type):
-    @classmethod
-    def __prepare__(self, name, bases):
-        return collections.OrderedDict()
-    def __new__(self, name, bases, classdict):
-        ks = list(classdict.keys())
-        for b in bases:
-            ks += b.__ordered__
-        classdict['__ordered__'] = [key for key in ks if key not in ('__module__', '__qualname__')]
-        return type.__new__(self, name, bases, classdict)
-
-class QuestionGroup(metaclass=OrderedClassMembers):
-    title = "Untitled question"
-    partially_scored = False
-    t_init = 0  # Time spend on initialization (placeholder; set this externally).
-    estimated_time = 0.42
-    has_called_init_ = False
-    _name = None
-    _items = None
-
-    @property
-    def items(self):
-        if self._items == None:
-            self._items = []
-            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]
-            for I in members:
-                self._items.append( I(question=self))
-        return self._items
-
-    @items.setter
-    def items(self, value):
-        self._items = value
-
-    @property
-    def name(self):
-        if self._name == None:
-            self._name = self.__class__.__name__
-        return self._name #
-
-    @name.setter
-    def name(self, val):
-        self._name = val
-
-    def init(self):
-        # Can be used to set resources relevant for this question instance.
-        pass
-
-    def init_all_item_questions(self):
-        for item in self.items:
-            if not item.question.has_called_init_:
-                item.question.init()
-                item.question.has_called_init_ = True
-
-
-class Report():
-    title = "report title"
-    version = None
-    questions = []
-    pack_imports = []
-    individual_imports = []
-    nL = 80 # Maximum line width
-
-    @classmethod
-    def reset(cls):
-        for (q,_) in cls.questions:
-            if hasattr(q, 'reset'):
-                q.reset()
-
-    @classmethod
-    def mfile(clc):
-        return inspect.getfile(clc)
-
-    def _file(self):
-        return inspect.getfile(type(self))
-
-    def _import_base_relative(self):
-        root_dir = self.pack_imports[0].__path__._path[0]
-        root_dir = os.path.dirname(root_dir)
-        relative_path = os.path.relpath(self._file(), root_dir)
-        modules = os.path.normpath(relative_path[:-3]).split(os.sep)
-        return root_dir, relative_path, modules
-
-    def __init__(self, strict=False, payload=None):
-        working_directory = os.path.abspath(os.path.dirname(self._file()))
-
-        self.wdir, self.name = setup_dir_by_class(self, working_directory)
-        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")
-        for (q,_) in self.questions:
-            q.nL = self.nL # Set maximum line length.
-
-        if payload is not None:
-            self.set_payload(payload, strict=strict)
-        # else:
-        #     if os.path.isfile(self.computed_answers_file):
-        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)
-        #     else:
-        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."
-        #         if strict:
-        #             raise Exception(s)
-        #         else:
-        #             print(s)
-
-    def main(self, verbosity=1):
-        # Run all tests using standard unittest (nothing fancy).
-        import unittest
-        loader = unittest.TestLoader()
-        for q,_ in self.questions:
-            import time
-            start = time.time() # A good proxy for setup time is to
-            suite = loader.loadTestsFromTestCase(q)
-            unittest.TextTestRunner(verbosity=verbosity).run(suite)
-            total = time.time()              - start
-            q.time = total
-
-    def _setup_answers(self):
-        self.main()  # Run all tests in class just to get that out of the way...
-        report_cache = {}
-        for q, _ in self.questions:
-            if hasattr(q, '_save_cache'):
-                q()._save_cache()
-                q._cache['time'] = q.time
-                report_cache[q.__qualname__] = q._cache
-            else:
-                report_cache[q.__qualname__] = {'no cache see _setup_answers in unitgrade2.py':True}
-        return report_cache
-
-    def set_payload(self, payloads, strict=False):
-        for q, _ in self.questions:
-            q._cache = payloads[q.__qualname__]
-
-def rm_progress_bar(txt):
-    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.
-    nlines = []
-    for l in txt.splitlines():
-        pct = l.find("%")
-        ql = False
-        if pct > 0:
-            i = l.find("|", pct+1)
-            if i > 0 and l.find("|", i+1) > 0:
-                ql = True
-        if not ql:
-            nlines.append(l)
-    return "\n".join(nlines)
-
-def extract_numbers(txt):
-    # txt = rm_progress_bar(txt)
-    numeric_const_pattern = '[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
-    rx = re.compile(numeric_const_pattern, re.VERBOSE)
-    all = rx.findall(txt)
-    all = [float(a) if ('.' in a or "e" in a) else int(a) for a in all]
-    if len(all) > 500:
-        print(txt)
-        raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))
-    return all
-
-class ActiveProgress():
-    def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):
-        self.t = t
-        self._running = False
-        self.title = title
-        self.dt = 0.1
-        self.n = int(np.round(self.t / self.dt))
-        self.show_progress_bar = show_progress_bar
-
-        # self.pbar = tqdm.tqdm(total=self.n)
-        if start:
-            self.start()
-
-    def start(self):
-        self._running = True
-        if self.show_progress_bar:
-            self.thread = threading.Thread(target=self.run)
-            self.thread.start()
-        self.time_started = time.time()
-
-    def terminate(self):
-        if not self._running:
-            raise Exception("Stopping a stopped progress bar. ")
-        self._running = False
-        if self.show_progress_bar:
-            self.thread.join()
-        if hasattr(self, 'pbar') and self.pbar is not None:
-            self.pbar.update(1)
-            self.pbar.close()
-            self.pbar=None
-
-        sys.stdout.flush()
-        return time.time() - self.time_started
-
-    def run(self):
-        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,
-                              bar_format='{l_bar}{bar}| [{elapsed}<{remaining}]')  # , unit_scale=dt, unit='seconds'):
-
-        for _ in range(self.n-1): # Don't terminate completely; leave bar at 99% done until terminate.
-            if not self._running:
-                self.pbar.close()
-                self.pbar = None
-                break
-
-            time.sleep(self.dt)
-            self.pbar.update(1)
-
-
-
-from unittest.suite import _isnotsuite
-
-# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.
-#     raise Exception("no suite")
-#     pass
-
-def instance_call_stack(instance):
-    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))
-    return s
-
-def get_class_that_defined_method(meth):
-    for cls in inspect.getmro(meth.im_class):
-        if meth.__name__ in cls.__dict__:
-            return cls
-    return None
-
-def caller_name(skip=2):
-    """Get a name of a caller in the format module.class.method
-
-       `skip` specifies how many levels of stack to skip while getting caller
-       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
-
-       An empty string is returned if skipped levels exceed stack height
-    """
-    stack = inspect.stack()
-    start = 0 + skip
-    if len(stack) < start + 1:
-      return ''
-    parentframe = stack[start][0]
-
-    name = []
-    module = inspect.getmodule(parentframe)
-    # `modname` can be None when frame is executed directly in console
-    # TODO(techtonik): consider using __main__
-    if module:
-        name.append(module.__name__)
-    # detect classname
-    if 'self' in parentframe.f_locals:
-        # I don't know any way to detect call from the object method
-        # XXX: there seems to be no way to detect static method call - it will
-        #      be just a function call
-        name.append(parentframe.f_locals['self'].__class__.__name__)
-    codename = parentframe.f_code.co_name
-    if codename != '<module>':  # top level usually
-        name.append( codename ) # function or a method
-
-    ## Avoid circular refs and frame leaks
-    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack
-    del parentframe, stack
-
-    return ".".join(name)
-
-def get_class_from_frame(fr):
-      import inspect
-      args, _, _, value_dict = inspect.getargvalues(fr)
-      # we check the first parameter for the frame function is
-      # named 'self'
-      if len(args) and args[0] == 'self':
-            # in that case, 'self' will be referenced in value_dict
-            instance = value_dict.get('self', None)
-            if instance:
-                  # return its class
-                  # isinstance(instance, Testing) # is the actual class instance.
-
-                  return getattr(instance, '__class__', None)
-      # return None otherwise
-      return None
-
-from typing import Any
-import inspect, gc
-
-def giveupthefunc():
-    frame = inspect.currentframe()
-    code  = frame.f_code
-    globs = frame.f_globals
-    functype = type(lambda: 0)
-    funcs = []
-    for func in gc.get_referrers(code):
-        if type(func) is functype:
-            if getattr(func, "__code__", None) is code:
-                if getattr(func, "__globals__", None) is globs:
-                    funcs.append(func)
-                    if len(funcs) > 1:
-                        return None
-    return funcs[0] if funcs else None
-
-
-from collections import defaultdict
-
-class UTextResult(unittest.TextTestResult):
-    nL = 80
-    number = -1 # HAcky way to set question number.
-    show_progress_bar = True
-    def __init__(self, stream, descriptions, verbosity):
-        super().__init__(stream, descriptions, verbosity)
-        self.successes = []
-
-    def printErrors(self) -> None:
-        # if self.dots or self.showAll:
-        #     self.stream.writeln()
-        # if hasattr(self, 'cc'):
-        #     self.cc.terminate()
-        # self.cc_terminate(success=False)
-        self.printErrorList('ERROR', self.errors)
-        self.printErrorList('FAIL', self.failures)
-
-    def addError(self, test, err):
-        super(unittest.TextTestResult, self).addFailure(test, err)
-        self.cc_terminate(success=False)
-
-    def addFailure(self, test, err):
-        super(unittest.TextTestResult, self).addFailure(test, err)
-        self.cc_terminate(success=False)
-        # if self.showAll:
-        #     self.stream.writeln("FAIL")
-        # elif self.dots:
-        #     self.stream.write('F')
-        #     self.stream.flush()
-
-    def addSuccess(self, test: unittest.case.TestCase) -> None:
-        # super().addSuccess(test)
-        self.successes.append(test)
-        # super().addSuccess(test)
-        #     hidden = issubclass(item.__class__, Hidden)
-        #     # if not hidden:
-        #     #     print(ss, end="")
-        #     # sys.stdout.flush()
-        #     start = time.time()
-        #
-        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)
-        #     q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title}
-        #     tsecs = np.round(time.time()-start, 2)
-        self.cc_terminate()
-
-
-
-    def cc_terminate(self, success=True):
-        if self.show_progress_bar or True:
-            tsecs = np.round(self.cc.terminate(), 2)
-            sys.stdout.flush()
-            ss = self.item_title_print
-            print(self.item_title_print + ('.' * max(0, self.nL - 4 - len(ss))), end="")
-            # current = 1
-            # possible = 1
-            # current == possible
-            ss = "PASS" if success else "FAILED"
-            if tsecs >= 0.1:
-                ss += " (" + str(tsecs) + " seconds)"
-            print(ss)
-
-
-    def startTest(self, test):
-        # super().startTest(test)
-        j =self.testsRun
-        self.testsRun += 1
-        # print("Starting the test...")
-        # show_progress_bar = True
-        n = UTextResult.number
-
-        item_title = self.getDescription(test)
-        # item_title = item_title.split("\n")[0]
-        item_title = test.shortDescription() # Better for printing (get from cache).
-        if item_title == None:
-            # For unittest framework where getDescription may return None.
-            item_title = self.getDescription(test)
-        # test.countTestCases()
-        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)
-        estimated_time = 10
-        nL = 80
-        #
-        if self.show_progress_bar or True:
-            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)
-        else:
-            print(self.item_title_print + ('.' * max(0, nL - 4 - len(self.item_title_print))), end="")
-
-        self._test = test
-
-    def _setupStdout(self):
-        if self._previousTestClass == None:
-            total_estimated_time = 1
-            if hasattr(self.__class__, 'q_title_print'):
-                q_title_print = self.__class__.q_title_print
-            else:
-                q_title_print = "<unnamed test. See unitgrade.py>"
-
-            # q_title_print = "some printed title..."
-            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)
-            self.cc = cc
-
-    def _restoreStdout(self): # Used when setting up the test.
-        if self._previousTestClass == None:
-            q_time = self.cc.terminate()
-            q_time = np.round(q_time, 2)
-            sys.stdout.flush()
-            print(self.cc.title, end="")
-            # start = 10
-            # q_time = np.round(time.time() - start, 2)
-            nL = 80
-            print(" " * max(0, nL - len(self.cc.title)) + (
-                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")
-            # print("=" * nL)
-
-from unittest.runner import _WritelnDecorator
-from io import StringIO
-
-class UTextTestRunner(unittest.TextTestRunner):
-    def __init__(self, *args, **kwargs):
-        from io import StringIO
-        stream = StringIO()
-        super().__init__(*args, stream=stream, **kwargs)
-
-    def _makeResult(self):
-        # stream = self.stream # not you!
-        stream = sys.stdout
-        stream = _WritelnDecorator(stream)
-        return self.resultclass(stream, self.descriptions, self.verbosity)
-
-def wrapper(foo):
-    def magic(self):
-        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))
-        # print(s)
-        foo(self)
-    magic.__doc__ = foo.__doc__
-    return magic
-
-from functools import update_wrapper, _make_key, RLock
-from collections import namedtuple
-_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
-
-def cache(foo, typed=False):
-    """ Magic cache wrapper
-    https://github.com/python/cpython/blob/main/Lib/functools.py
-    """
-    maxsize = None
-    def wrapper(self, *args, **kwargs):
-        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )
-        # key = (self.cache_id(), '@cache')
-        # if self._cache_contains[key]
-
-        if not self._cache_contains(key):
-            value = foo(self, *args, **kwargs)
-            self._cache_put(key, value)
-        else:
-            value = self._cache_get(key)
-        return value
-    return wrapper
-
-
-class UTestCase(unittest.TestCase):
-    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.
-    _cache = None  # Read-only cache. Ensures method always produce same result.
-    _cache2 = None  # User-written cache.
-
-    def capture(self):
-        return Capturing2(stdout=self._stdout)
-
-    @classmethod
-    def question_title(cls):
-        """ Return the question title """
-        return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__
-
-    @classmethod
-    def reset(cls):
-        print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")
-        cls._outcome = None
-        cls._cache = None
-        cls._cache2 = None
-
-    def _callSetUp(self):
-        self._stdout = sys.stdout
-        import io
-        sys.stdout = io.StringIO()
-        super().setUp()
-        # print("Setting up...")
-
-    def _callTearDown(self):
-        sys.stdout = self._stdout
-        super().tearDown()
-        # print("asdfsfd")
-
-    def shortDescriptionStandard(self):
-        sd = super().shortDescription()
-        if sd == None:
-            sd = self._testMethodName
-        return sd
-
-    def shortDescription(self):
-        # self._testMethodDoc.strip().splitlines()[0].strip()
-        sd = self.shortDescriptionStandard()
-        title = self._cache_get(  (self.cache_id(), 'title'), sd )
-        return title if title != None else sd
-
-    @property
-    def title(self):
-        return self.shortDescription()
-
-    @title.setter
-    def title(self, value):
-        self._cache_put((self.cache_id(), 'title'), value)
-
-    def _get_outcome(self):
-        if not (self.__class__, '_outcome') or self.__class__._outcome == None:
-            self.__class__._outcome = {}
-        return self.__class__._outcome
-
-    def _callTestMethod(self, testMethod):
-        t = time.time()
-        self._ensure_cache_exists() # Make sure cache is there.
-        if self._testMethodDoc != None:
-            # Ensure the cache is eventually updated with the right docstring.
-            self._cache_put((self.cache_id(), 'title'), self.shortDescriptionStandard() )
-        # Fix temp cache here (for using the @cache decorator)
-        self._cache2[ (self.cache_id(), 'assert') ] = {}
-
-        res = testMethod()
-        elapsed = time.time() - t
-        # self._cache_put( (self.cache_id(), 'title'), self.shortDescription() )
-
-        self._get_outcome()[self.cache_id()] = res
-        self._cache_put( (self.cache_id(), "time"), elapsed)
-
-    # This is my base test class. So what is new about it?
-    def cache_id(self):
-        c = self.__class__.__qualname__
-        m = self._testMethodName
-        return (c,m)
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        self._load_cache()
-        self._assert_cache_index = 0
-        # self.cache_indexes = defaultdict(lambda: 0)
-
-    def _ensure_cache_exists(self):
-        if not hasattr(self.__class__, '_cache') or self.__class__._cache == None:
-            self.__class__._cache = dict()
-        if not hasattr(self.__class__, '_cache2') or self.__class__._cache2 == None:
-            self.__class__._cache2 = dict()
-
-    def _cache_get(self, key, default=None):
-        self._ensure_cache_exists()
-        return self.__class__._cache.get(key, default)
-
-    def _cache_put(self, key, value):
-        self._ensure_cache_exists()
-        self.__class__._cache2[key] = value
-
-    def _cache_contains(self, key):
-        self._ensure_cache_exists()
-        return key in self.__class__._cache
-
-    def wrap_assert(self, assert_fun, first, *args, **kwargs):
-        key = (self.cache_id(), 'assert')
-        if not self._cache_contains(key):
-            print("Warning, framework missing", key)
-        cache = self._cache_get(key, {})
-        id = self._assert_cache_index
-        if not id in cache:
-            print("Warning, framework missing cache index", key, "id =", id)
-        _expected = cache.get(id, first)
-        assert_fun(first, _expected, *args, **kwargs)
-        cache[id] = first
-        self._cache_put(key, cache)
-        self._assert_cache_index += 1
-
-    def assertEqualC(self, first: Any, msg: Any = ...) -> None:
-        self.wrap_assert(self.assertEqual, first, msg)
-
-    def _cache_file(self):
-        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"
-
-    def _save_cache(self):
-        # get the class name (i.e. what to save to).
-        cfile = self._cache_file()
-        if not os.path.isdir(os.path.dirname(cfile)):
-            os.makedirs(os.path.dirname(cfile))
-
-        if hasattr(self.__class__, '_cache2'):
-            with open(cfile, 'wb') as f:
-                pickle.dump(self.__class__._cache2, f)
-
-    # But you can also set cache explicitly.
-    def _load_cache(self):
-        if self._cache != None: # Cache already loaded. We will not load it twice.
-            return
-            # raise Exception("Loaded cache which was already set. What is going on?!")
-        cfile = self._cache_file()
-        # print("Loading cache from", cfile)
-        if os.path.exists(cfile):
-            with open(cfile, 'rb') as f:
-                data = pickle.load(f)
-                self.__class__._cache = data
-        else:
-            print("Warning! data file not found", cfile)
-
-def hide(func):
-    return func
-
-def makeRegisteringDecorator(foreignDecorator):
-    """
-        Returns a copy of foreignDecorator, which is identical in every
-        way(*), except also appends a .decorator property to the callable it
-        spits out.
-    """
-    def newDecorator(func):
-        # Call to newDecorator(method)
-        # Exactly like old decorator, but output keeps track of what decorated it
-        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done
-        R.decorator = newDecorator  # keep track of decorator
-        # R.original = func         # might as well keep track of everything!
-        return R
-
-    newDecorator.__name__ = foreignDecorator.__name__
-    newDecorator.__doc__ = foreignDecorator.__doc__
-    # (*)We can be somewhat "hygienic", but newDecorator still isn't signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it's not a big issue
-    return newDecorator
-
-hide = makeRegisteringDecorator(hide)
-
-def methodsWithDecorator(cls, decorator):
-    """
-        Returns all methods in CLS with DECORATOR as the
-        outermost decorator.
-
-        DECORATOR must be a "registering decorator"; one
-        can make any decorator "registering" via the
-        makeRegisteringDecorator function.
-
-        import inspect
-        ls = list(methodsWithDecorator(GeneratorQuestion, deco))
-        for f in ls:
-            print(inspect.getsourcelines(f) ) # How to get all hidden questions.
-    """
-    for maybeDecorated in cls.__dict__.values():
-        if hasattr(maybeDecorated, 'decorator'):
-            if maybeDecorated.decorator == decorator:
-                print(maybeDecorated)
-                yield maybeDecorated
-
diff --git a/unitgrade2/version.py b/unitgrade2/version.py
deleted file mode 100644
index acb984f2d0682c8a7cfc890ce91bbbc229d98abe..0000000000000000000000000000000000000000
--- a/unitgrade2/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.9.0"
\ No newline at end of file