From 240dc8e51eefe80d351e123e04f0f43b68ac259f Mon Sep 17 00:00:00 2001
From: Tue Herlau <tuhe@dtu.dk>
Date: Thu, 26 Aug 2021 11:00:25 +0200
Subject: [PATCH] Reorg for new version

---
 .../__pycache__/homework1.cpython-38.pyc      | Bin 962 -> 833 bytes
 .../cs103/__pycache__/report3.cpython-38.pyc  | Bin 1152 -> 1102 bytes
 .../report3_complete.cpython-38.pyc           | Bin 1353 -> 1295 bytes
 .../instructor/cs103/report3.py               |   2 +-
 .../instructor/cs103/report3_complete.py      |   4 ++--
 .../cs103/report3_complete_grade.py           |   4 ++--
 .../instructor/cs103/report3_grade.py         |   4 ++--
 ..._20.token => Report3_handin_0_of_20.token} | Bin 70338 -> 70223 bytes
 .../__pycache__/homework1.cpython-38.pyc      | Bin 1080 -> 992 bytes
 .../cs103/__pycache__/report3.cpython-38.pyc  | Bin 1152 -> 1102 bytes
 .../report3_complete.cpython-38.pyc           | Bin 1353 -> 1295 bytes
 .../__pycache__/report3_grade.cpython-38.pyc  | Bin 57970 -> 57948 bytes
 .../students/cs103/homework1.py               |  21 ++++++++++--------
 .../example_docker/students/cs103/report3.py  |   2 +-
 .../students/cs103/report3_grade.py           |   4 ++--
 15 files changed, 22 insertions(+), 19 deletions(-)
 rename examples/example_docker/students/cs103/{Report3_handin_20_of_20.token => Report3_handin_0_of_20.token} (97%)

diff --git a/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc
index 22c81a34ff5073fcde109ee78fdf683b3e67801a..301c79fcc3244747ff167131505a756d0f54b54e 100644
GIT binary patch
delta 616
zcmX@aevpkXl$V!_fq{V`BwRhwk7**GOuZWe149Z!3S$dH6hjJA3UdoX6k{q=3QIOq
zky9#j3TrB33R?<WGt+E_xlGNBj0~yFDeS2XDQqbmDV!->=`78R%}k68Dcr#fnmoyj
zAhV$u#Aaq-U~pz&U?}EcU|=X=C}FH&Xl86?3}(<|tXjmSprD{o@19wfnx~Mckds+l
zqM)8z38K{%ic(8Ti}H$L;>r1W#hJw=nR)37`DqFz8L0}ziMgo?sX3{+sd**E3YmEd
zMX6<}Ma8KK`9&$IMS2RZdg*##3!vihAagaM4RwrkjAJzwiZk*{b5azFQtd$YC`23U
z80i?sDrh9atki_))?~WH0@8Vl18Po+CSw#E#FZ-<ikK%pEaC-uw3rFxYt|yx$uUft
z^_q-TNeU2;m6Ycz<dx<or4|({BqSy%B<7_kBqSv$lw@QUE2Nd?C6{F8=fRv)oS9pY
zQwjAv$eWo(3dN<lR$!0VDI}((Xe8<+X)4$%Bx)xq6oEollj#;?;w{D`Fb6`gh%zuR
z++t2lN#OtmJ=kT;MI4jGnbnwHGEKH;mX3J|()yB>fq~&AI|BoQpC(HYh*Jd8aEl{8
wJ})shH9r0pYe7+FUJ1lhkPZlo1!O#jO>TZlX-=vg*!LiP9PAvd984mN0M?wGC;$Ke

literal 962
zcmWIL<>g{vU|`@+QA^}uW?*;>;vi#o1_lNP1_p-WFa`#O6owSW7KSK>6s8pB7KSLs
z6qXd$7KSLM6t)!h7KSM1RF)KuY^I{1RMr&ERHhWJ6s~5b*$i`;ni&}xQdv{DQyEgY
zQg~8$Q~1(Zni-p!7#ULdgBdghk{OY#WM*Js0AXj4tuhP@4CxHDj5Q1{46%H*Of^jD
zj5Unn47H4f95sx=3@aJ^Rx)TZN3rEr=42L^++r(AEiTO|xy6`W#KgeB049FLJ6pws
z7N-^!$CQ+2q{g`9Czs}?=9Lu3l;&lYq!%Toq{bH%WtJtDq{gIHB<2?6q!z;%@hSPq
z*{MY_nR&$}MWx9l`9(3w#fApPF&X)}spa`a*@k)rmA80`Qp-|{ic{l3ZeRtun*-z_
zCKg5xMjoajHU<WUWRL(1gS-n81bb17fq|ifA%zhXNT7fP1!_7=El7;5h9QN$gt3O9
znX#FLks*a4m_d`nuSzE*BUJ%vr2@!Gg|yPV<dV$%JcZ=MJh)6ESnegrvX`L1cnJ#C
zmn;kn3{{*ynZ+dv<%z{sRRU0hQWQX5$jr~vWV*##P?VWh0(Q41<1NM<a4<p$kne7Z
z6la1$3GChY!qU`Yklnl>e}jC*0P;0s5s04*vKQuS5F3QSz6J$Q4Ff1DgBglIP6QjT
z$#jb`@fKqem;)ie7BVNMqzHk`2kB>EED`{PC`cE~Y!DkH2RFNf0TgObvo#rSF&CHS
zYBJtp$|;5z3sPJJHk&QCGQPMpR}{&3VFm_<TO9H6xrv#1@$psSk@=-X3Xqhgke{Yt
zq@b-}q+qL11oA|cSP(e!GxO3F5*3Pb6LWIF;a<eaz`*blWW!5P9`e&<1BWL#SitTt
wg4vRnn420OU&IYdujmfsu*uC&Da}c>0~uD#!@$76!NI}G!OX$T!6d>6077)lMF0Q*

diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc
index 29b97cd4ae665c681a0729f0b00a201c6324a7ea..c296ad40721b7cfc0c42277ed6eca9f2b2158a2c 100644
GIT binary patch
delta 289
zcmZqRJjcNs%FD~ez`($u6rrBjw2{|?(U^&Wfq|8Qfx(%9fuY!dfq@}~F@-6IA(ye1
zk&&T>VF6POV+wN$b2HOIIDhg4MsXj;Tg-_mDKA0VGw1yO|NkY(5KZP={K>_J2F7|B
z`MIg(`9;}=MH~za47Yd^i;Gi>N?Z#|6LXw3nQpNZr{<)ArB*T&@lBRxDre-GJeA2s
zRDgkjp@<Ka;Gg`Csg_$1Bq#(TgeR9V8)%{{0?CLnFfj0gJi^Anz`()C!pOx~Bs%##
TbB??uOu7hc4!Y*aQ7mcz{1G|S

delta 341
zcmX@d(ZI<Y%FD~ez`(%J7OI{&V<WEzV<rm&0|P4q1A{XI14FR|0|P?}V+vCaLoQ=2
zV=hxIQ!aBYGb2L{!vdBX<`m`>=4PgajBq|n3M+)4!Vt`$$>vwY$iTp$$$X2aD77rL
zs5mt~C$qTZ7F%v*d~s>+<OPhPUaYtHlZy=vjP)||b5qOni?R)iI2afhia0?8$Yr;9
z5{rvdi%MJzOA~XPHCb-46sP8-p($F)P$V$<HDfuW$mC=u7f~UQW&u<}aPl6eT5e&G
zpa_T%o$SJFpoy*sBqPqiz`zgkJR1W80|z4uBNt<l_~g0FIr7pl=_0T>=$a>Mu&4n5
Dke5TQ

diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc
index b14cd695060a09e05254b731f1807e3ddf74cc65..6059825c3966108ca837436041e85c8db3a0d602 100644
GIT binary patch
delta 401
zcmX@f)z8Hj%FD~ez`(#D6QQ2?muVwkFk>PU0|Nsq0|SFI0|P^`0Rsa=3S$aW4nr<u
zEh8gC4Z{Ma8pagn6y|28g$O=N3M+)4!Vt`$$>vwY$iTp`lJOREVoJ(Okb#+V{{R2~
z5@hU3=E)Bj#pI)S5{rvdi%MJzOA~XPS2EpVDNfBvE8+y{V<_UEtjW~C$TN8rlR_j&
zJ4hqgJqiTe1Gb+9Vt)|_0|P@5$a_URAQmr(;D-|eleL-U8F?mqGkeMlf<)Osgb;{8
zC<3ztCU0hL<Q4|Wh=2&u$@VM;nqVmeApnvQXJB9esVfGV#lgtJ$i-MBK6wU9j=U61
Sx(Fm+gwO|O2~1XIRRaJ|L`PZx

delta 454
zcmeC@I?2Ts%FD~ez`($8CR8m^lX)XwFk?Ln0|Nsq0|SFI0|P^`1p@;^3S$aW4nr<u
zEn_ZIEmJOYEi)rS4Z{MK8s-${6y|28g^X}MOA0H5pTZE#pvmS}#K^$Fu#))}Pf==F
zYEf}&d`@O@$t||r%J|~a+*{0vDJd_R7#J8b=luWw|0T%Mm8_mo{K>_J2F7|B`MIg(
z`9;}=MVt%_3`HPk7I8B$Fhubr78j=$mADp`CgwP=WVyvsoSKt{rf4NYk>KR-j17z;
zlPj1Mv{C$|f!9xycQeUI@qldP0}=cnLJ&>}P5#Iv&nPlkkl9mS7$nLDB1Aw0LJ^oH
zG`W(wky{ibBL*VGC$q8`Xo95>gb+wZl7WE%WL7cAEDlB%MlQx8$;ojnIr1_v=_0T>
N2z_9d(BzjaY5;UnS*-v7

diff --git a/examples/example_docker/instructor/cs103/report3.py b/examples/example_docker/instructor/cs103/report3.py
index 0811042..8108883 100644
--- a/examples/example_docker/instructor/cs103/report3.py
+++ b/examples/example_docker/instructor/cs103/report3.py
@@ -6,7 +6,7 @@ import random
 class Week1(UTestCase):
     """ The first question for week 1. """
     def test_add(self):
-        from cs103.homework1 import reverse_list, my_sum, add
+        from cs103.homework1 import add
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
diff --git a/examples/example_docker/instructor/cs103/report3_complete.py b/examples/example_docker/instructor/cs103/report3_complete.py
index da2fe59..740b4f7 100644
--- a/examples/example_docker/instructor/cs103/report3_complete.py
+++ b/examples/example_docker/instructor/cs103/report3_complete.py
@@ -6,13 +6,13 @@ import random
 class Week1(UTestCase):
     """ The first question for week 1. """
     def test_add(self):
-        from cs103.homework1 import reverse_list, my_sum, add
+        from cs103.homework1 import add
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
     @hide
     def test_add_hidden(self):
-        from cs103.homework1 import reverse_list, my_sum, add
+        from cs103.homework1 import add
         self.assertEqualC(add(2,2))
 
 import cs103
diff --git a/examples/example_docker/instructor/cs103/report3_complete_grade.py b/examples/example_docker/instructor/cs103/report3_complete_grade.py
index ebdfa03..34e5f3a 100644
--- a/examples/example_docker/instructor/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_complete_grade.py
@@ -428,8 +428,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6063000000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '8004951f000000000000007d948c055765656b31947d948c0474696d6594473f6067000000000073732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/cs103/report3_grade.py b/examples/example_docker/instructor/cs103/report3_grade.py
index 1d4118e..bd7418c 100644
--- a/examples/example_docker/instructor/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_grade.py
@@ -428,8 +428,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f504f000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8a9f000000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5069000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8eb8600000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/students/cs103/Report3_handin_20_of_20.token b/examples/example_docker/students/cs103/Report3_handin_0_of_20.token
similarity index 97%
rename from examples/example_docker/students/cs103/Report3_handin_20_of_20.token
rename to examples/example_docker/students/cs103/Report3_handin_0_of_20.token
index 65730c1a655642d613aca21c8e74d1bbe7fe2112..f702bb0c28a621263baf89d48d3ca583519a6a52 100644
GIT binary patch
delta 672
zcmX@Kl;!*qmI>mF3=<_x7#Sw|T1ayWGBN~sGqZ>=fB<KNR4~^O_5d?x1_rZ<3%Vvh
zWR$E|;N?<KP*6}(Qi3qtGs{x*6cQD3GK)(T)N?CAw7No3YDsBPUNKBOIX|yBv$!NP
zFI^!&O`#+sRiQXBH&r1uCp9-UucTNZGf$x?wJf!$I8`CPC?&N>Pr+3$T@PXbR3aW^
zuts#Sp^lM`ajd37aYlY=P72f_g=k|PBOSw71&u_QotjYXaEB@@g!sGoTPYYS<YeZh
z7Axdt78hscfoz%lHATMOGq(WZ8ilmdyyTM1{5*xE{FF*1O<pcuu9Va?g~XH;jYOR!
zO)EqQC_p@4Ql77nSDKrYT2!o%keHy5n3tlEkd&ZMl95@AFb(S9;>_HFoJyFxOEOY3
zixi4WbFHl)zO{qfqhPC$sGWoeX#DP%z~%nQIm}o1Krwmrq<`R7CI*JDOq=~!4l=SP
zrljz4O=sj|RNm~%ro+e!QuBP%3fU4)Muw8+Humjp?2HL)+yC$~-eF`ivz&g5pRu1M
zHOazkdaeND8L)DA;+XCx$Y?ANG67)=1A_o45kc{`#<b}(1R3SQis068GBSuk6@aL1
Sjh)l42{LLj8cq{pJOlt!;lUdK

delta 879
zcmX@VgyqmumI>mFA`>M{7)2)fT1eLjGBN~sGqZ>=fPiY4RPgWL!M(N23=Fjs7j)Gt
zV*$Ke$_jy{sl_Fk`FRS4dJ5r1nI)+Ti3(|@dC6e0qSUg~qT<x}oXp}9h4PHd<P3$x
z<mA+Xl46BK1(2YQLSkNuLQ!f-X;B_XEHAYjEDkccJhLQ2p(G<!p*S%&wO%1LCp9-U
zucTNZskB5PGfyGEpdh~(WM6(!N@|fFFBcXED)Vxsq^2n#J3}M45@fNa6)%^9f`Wpw
zLWsYMzm-B{erb_HaehuI*xebaMX3<^qSWHjoDv1wdWGm%h)7y~kwP}e0T6?r=77wC
zsa1&1j#bcBfN*)aAle{)L=pr!tT;2bAO{jW@rBTU)POmypeQr1L_;YgBUPa&wH_Rh
z;IKgiZ*pQD%x}=}RMLdlmZM;+5N)WVV5Fm9tfOE8bvo2UA8>3W7F#LlDCFoUC_#fK
zMFC_-W`3TM4$M?=#A)PcYVx{sL4q<dB}F4qCrK0P?ILi@DI{tqf&2;bQf_5@acQnb
zPBEGisNm#NY$}tznAh`x5*UZT(r#{M1_o~C&Fm}(86}|(0o$sh05M5NAu%Oo@&{Ih
z&97Mv8F@k4-Z_>m>*8c&=xWwv->%8dn81cx=k$%dj0)S=@G|aXWHL#cp3cwM&yr}F
zW-$F1KjRs&Y4B7&{jva~u{_8Egu56R1VFg~inlecovtd#D6arg1a}1|BZC-J0f^ex
Y_>66`9=pW!8bL+@#wF8x1sM+k0JT#AYybcN

diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc
index 386653c65aa25df8b3bd8067a58ce228063ee330..28480881792e704c174bfa27c008fa2e0e642016 100644
GIT binary patch
delta 657
zcmdnN@qnE-l$V!_fq{WRDMCHbY$C60fENP;LkdF_LkeRGQwu{BV+wN$OAA93Qz~-`
zYc^AnTPjNmTPjludkT9q(`<&hOwEjp45=(B9I1>c>?xcnTq)e?EX|C~OpFXEJi!c_
zyc4_1>KPdr7?>Cs7@Qdx7>aoq7#K<zQW%>Vn;27=f*CZKs}^x7C@3hnXO^YrDI_Z7
zWEPhwsOMIKXmy36)RNMoykeMma(-TMW^qYoUb;elnnFoNszPyMZmL3RPHJvyUP-Y+
zW}ZS(YFTPgajHUoQA%o&o`S1heYzgl0;ohh$Y70VLmeX><5*3F;*9*#oD`@%3em<o
zMmmPE3L1$pD>WgSe~EkM7C>xKNGr`tF3HT#Q%K5Bsq|aPcuUwXzXUFwn&MhilwYLD
z7{vzh1j|Z>B9_U1j0L<bpa1}c1Or<U+vMX+vh`R(BMBN3CFS`Fd8N5YsYS&K35f{`
ziFqjs2}ubGB^jB;2q!?DU!0j+kW&fsMM*|#W|2a1X|5I6<8}&(DJdF>I!T%ewhD>b
zNeV@v81XA&V$fz_&}6#Bn0SjZsR-ma2*Jw0z;KH>F(n1$x?-?<S&BF(H!-U*y=0oa
zidov}B{Ks9!%L95m+TA-41Sudx7g$3Q}UDJ<BQlB7#NB;K}K-I$LA&HrpCwLVl61j
d%qxMI4zXu4KZ`gYDCw~<b8v95aWIK60sspQu0a3*

delta 763
zcmaFBzJr4|l$V!_fq{XcEmS?Rb|SBALKFi7LkdF_LkeRGQwu{BV+wN$OAA93QwnPe
zTMI)Ja|(M3M+-v~ODby$XEsw&SSnizS1NM~cM5ki(`<&hOwEjp45@4>JgJN++$p>%
zd@20tEX|C~OpFXE0>KQLf)lIC{FoUS7(m#Wfq|h|hJk@0ouQVohQWm)mamqnhAEw~
zhEbfMma&kdhB267C8OU;22JKDw%p2`%;J(;Y(=TXr8y<H7_*C*7#J9em?nQ>EU4!I
z83iI3m{=G&7<rhA*cccXl0jaBVMYc929O}wRxJhwh7yJpMv!w;m_VLPVNGYL1&OiM
zFr=`TFxD_MGd8m@GNdpBGiY-7Rq2Feq$(7pmZcUIrz(J4qL5aamt2yWpQn(VSf2-%
zN(4*31ljfy<iM98pT1;aU|^`?^vNtPQ7BI=wyF|<nv|jdGB7hgPm}2uYe7+FUJ2OY
znvAy?bD;hMd6JcZf#H@&ab|8oPHH^J^7z8i)MAk30w9mGaf3Vw@;74<@8o<YS#WsN
zFo41_n4t*d7O-KOOt%;lZ!so;IS>MDGIL@|iYUk+kb@Z*i$I1ZgLHw61z`{yBnJ*V
zP%xJ;fWilAwkG2(=Hk*^O~xpuoMN!C@?f$EY&ctPWqfgIt^|_#LX*vy)tHJnCdV;L
z7Z-6dFfhCXDSgS#z`)?A$$pDHK0YNsIX)g77Dc?E5CA!;NC*@R9P#maiMgrq@kJnC
sLo5Qh4Z;GMbBn_!H$SB`C)Ez*?qWU$1_l;(4h{}h4rUH!4ki&s0A+}oRsaA1

diff --git a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc
index 29b97cd4ae665c681a0729f0b00a201c6324a7ea..c296ad40721b7cfc0c42277ed6eca9f2b2158a2c 100644
GIT binary patch
delta 289
zcmZqRJjcNs%FD~ez`($u6rrBjw2{|?(U^&Wfq|8Qfx(%9fuY!dfq@}~F@-6IA(ye1
zk&&T>VF6POV+wN$b2HOIIDhg4MsXj;Tg-_mDKA0VGw1yO|NkY(5KZP={K>_J2F7|B
z`MIg(`9;}=MH~za47Yd^i;Gi>N?Z#|6LXw3nQpNZr{<)ArB*T&@lBRxDre-GJeA2s
zRDgkjp@<Ka;Gg`Csg_$1Bq#(TgeR9V8)%{{0?CLnFfj0gJi^Anz`()C!pOx~Bs%##
TbB??uOu7hc4!Y*aQ7mcz{1G|S

delta 341
zcmX@d(ZI<Y%FD~ez`(%J7OI{&V<WEzV<rm&0|P4q1A{XI14FR|0|P?}V+vCaLoQ=2
zV=hxIQ!aBYGb2L{!vdBX<`m`>=4PgajBq|n3M+)4!Vt`$$>vwY$iTp$$$X2aD77rL
zs5mt~C$qTZ7F%v*d~s>+<OPhPUaYtHlZy=vjP)||b5qOni?R)iI2afhia0?8$Yr;9
z5{rvdi%MJzOA~XPHCb-46sP8-p($F)P$V$<HDfuW$mC=u7f~UQW&u<}aPl6eT5e&G
zpa_T%o$SJFpoy*sBqPqiz`zgkJR1W80|z4uBNt<l_~g0FIr7pl=_0T>=$a>Mu&4n5
Dke5TQ

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc
index b14cd695060a09e05254b731f1807e3ddf74cc65..6059825c3966108ca837436041e85c8db3a0d602 100644
GIT binary patch
delta 401
zcmX@f)z8Hj%FD~ez`(#D6QQ2?muVwkFk>PU0|Nsq0|SFI0|P^`0Rsa=3S$aW4nr<u
zEh8gC4Z{Ma8pagn6y|28g$O=N3M+)4!Vt`$$>vwY$iTp`lJOREVoJ(Okb#+V{{R2~
z5@hU3=E)Bj#pI)S5{rvdi%MJzOA~XPS2EpVDNfBvE8+y{V<_UEtjW~C$TN8rlR_j&
zJ4hqgJqiTe1Gb+9Vt)|_0|P@5$a_URAQmr(;D-|eleL-U8F?mqGkeMlf<)Osgb;{8
zC<3ztCU0hL<Q4|Wh=2&u$@VM;nqVmeApnvQXJB9esVfGV#lgtJ$i-MBK6wU9j=U61
Sx(Fm+gwO|O2~1XIRRaJ|L`PZx

delta 454
zcmeC@I?2Ts%FD~ez`($8CR8m^lX)XwFk?Ln0|Nsq0|SFI0|P^`1p@;^3S$aW4nr<u
zEn_ZIEmJOYEi)rS4Z{MK8s-${6y|28g^X}MOA0H5pTZE#pvmS}#K^$Fu#))}Pf==F
zYEf}&d`@O@$t||r%J|~a+*{0vDJd_R7#J8b=luWw|0T%Mm8_mo{K>_J2F7|B`MIg(
z`9;}=MVt%_3`HPk7I8B$Fhubr78j=$mADp`CgwP=WVyvsoSKt{rf4NYk>KR-j17z;
zlPj1Mv{C$|f!9xycQeUI@qldP0}=cnLJ&>}P5#Iv&nPlkkl9mS7$nLDB1Aw0LJ^oH
zG`W(wky{ibBL*VGC$q8`Xo95>gb+wZl7WE%WL7cAEDlB%MlQx8$;ojnIr1_v=_0T>
N2z_9d(BzjaY5;UnS*-v7

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc
index b179654436d68351cfc809f5f93f6c2468e946fa..cf7a9b8f4dcaf0820e8bd567cf158fadda093cda 100644
GIT binary patch
delta 62
zcmex#g!#@9X5LU<UM>a(1_q@F^~92myt9=VyEm^?p1O-AF(rj-vYw^H=7M|67@5p0
TCrdo&XGu-6Fx$NP!Er_afhHB|

delta 75
zcmca}g!$7EX5LU<UM>a(28Om!^~A1?yt9=V7j9muJaw07QEFLgQE_T~PG)h5jzVr_
gd~s>6&Su+thZ&hn(k3%K=x0f^Of%R#^}%sQ0Qxl?Pyhe`

diff --git a/examples/example_docker/students/cs103/homework1.py b/examples/example_docker/students/cs103/homework1.py
index 8da29bc..3543f1b 100644
--- a/examples/example_docker/students/cs103/homework1.py
+++ b/examples/example_docker/students/cs103/homework1.py
@@ -1,16 +1,19 @@
 """
 Example student code. This file is automatically generated from the files in the instructor-directory
 """
-def reverse_list(mylist): #!f
-    # TODO: Your solution here
-    result = []
-    for k in mylist:
-        result = [k] + result
+def reverse_list(mylist): 
+    """
+    Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
+    reverse_list([1,2,3]) should return [3,2,1] (as a list).
+    """
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
-    return result
-
-def add(a,b): #!f
-    return a+b
+def add(a,b): 
+    """ Given two numbers `a` and `b` this function should simply return their sum:
+    > add(a,b) = a+b """
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_docker/students/cs103/report3.py b/examples/example_docker/students/cs103/report3.py
index ef52953..6eafc76 100644
--- a/examples/example_docker/students/cs103/report3.py
+++ b/examples/example_docker/students/cs103/report3.py
@@ -9,7 +9,7 @@ import random
 class Week1(UTestCase):
     """ The first question for week 1. """
     def test_add(self):
-        from cs103.homework1 import reverse_list, my_sum, add
+        from cs103.homework1 import add
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
diff --git a/examples/example_docker/students/cs103/report3_grade.py b/examples/example_docker/students/cs103/report3_grade.py
index be58362..4b01996 100644
--- a/examples/example_docker/students/cs103/report3_grade.py
+++ b/examples/example_docker/students/cs103/report3_grade.py
@@ -430,8 +430,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f504f000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8a9f000000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5069000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8eb8600000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
-- 
GitLab