From c190e1bdb091393b901d8cc99cdf80ad2884ed85 Mon Sep 17 00:00:00 2001
From: Tue Herlau <tuhe@dtu.dk>
Date: Thu, 26 Aug 2021 11:33:14 +0200
Subject: [PATCH] Reorg for new version

---
 README.md                                     | 131 +++++-------------
 .../cs103/__pycache__/report3.cpython-38.pyc  | Bin 1102 -> 1056 bytes
 .../report3_complete.cpython-38.pyc           | Bin 1295 -> 1249 bytes
 .../example_docker/instructor/cs103/deploy.py |  45 +++---
 .../instructor/cs103/report3.py               |   4 +-
 .../instructor/cs103/report3_complete.py      |   6 +-
 .../cs103/report3_complete_grade.py           |   4 +-
 .../instructor/cs103/report3_grade.py         |   4 +-
 .../tmp/cs103/Report3_handin_0_of_20.token    | Bin 0 -> 138232 bytes
 .../__pycache__/homework1.cpython-38.pyc      | Bin 1010 -> 922 bytes
 .../report3_complete_grade.cpython-38.pyc     | Bin 57811 -> 57919 bytes
 .../unitgrade-docker/tmp/cs103/homework1.py   |  33 ++---
 .../unitgrade-docker/tmp/cs103/report3.py     |   6 +-
 .../tmp/cs103/report3_complete_grade.py       |   4 +-
 .../tmp/cs103/report3_grade.py                |   4 +-
 .../cs103/Report3_handin_0_of_20.token        | Bin 70223 -> 70152 bytes
 .../__pycache__/homework1.cpython-38.pyc      | Bin 992 -> 992 bytes
 .../cs103/__pycache__/report3.cpython-38.pyc  | Bin 1102 -> 1056 bytes
 .../report3_complete.cpython-38.pyc           | Bin 1295 -> 1249 bytes
 .../__pycache__/report3_grade.cpython-38.pyc  | Bin 57948 -> 57934 bytes
 .../example_docker/students/cs103/report3.py  |   4 +-
 .../students/cs103/report3_grade.py           |   4 +-
 22 files changed, 90 insertions(+), 159 deletions(-)
 create mode 100644 examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token

diff --git a/README.md b/README.md
index 6b5b4d3..732ee86 100644
--- a/README.md
+++ b/README.md
@@ -1,110 +1,53 @@
 # Unitgrade-private
-**Do not distribute this repository, or files from this repository, to students**
-This repository contains the secret parts of the unitgrade framework. 
 
-## At a glance 
-Homework is broken down into **reports**. A report is a collection of questions which are individually scored, and each question may in turn involve multiple tests. Each report is therefore given an overall score based on a weighted average of how many tests are passed.
 
-In practice, a report consist of an ordinary python file which they simply run, and which executes a sequence of tests. An example:
-```
-python cs101report1.py
-```
-The file `cs101report1.py` is a non-obfuscated file which they can navigate and debug using a debugger. The file may contain the homework, or it may call functions the students have written.  Running the file creates console output which tells the students their current score for each test:
-```
-Starting on 02/12/2020 14:57:06
-Evaluating CS 101 Report 1
+Unitgrade is an automatic report and exam evaluation framework that enables instructors to offer automatically evaluated programming assignments. 
+ Unitgrade is build on pythons `unittest` framework so that the tests can be specified in a familiar syntax and will integrate with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) and an easy and 100% safe mechanism for verifying the students results and creating additional, hidden tests. A powerful cache system allows instructors to automatically create test-answers based on a working solution. 
 
-Question 1: Reversal of list
-================================================================================
-*** q1.1) ListReversalItem..................................................PASS
-*** q1.2) ListReversalWordsItem.............................................PASS
-*** Question q1............................................................. 5/5
+ - 100% Python `unittest` compatible
+ - No external configuration files: Just write a `unittest`
+ - No unnatural limitations: Use any package or framework. If you can `unittest` it, it works.   
+ - Granular security model: 
+    - Students get public `unittests` for easy development of solutions
+    - Students get a tamper-resistant file to create submissions which are uploaded
+    - Instructors can automatically verify the students solution using a Docker VM and run hidden tests
+ - Tests are quick to run and will integrate with your IDE
 
-Question 2: Linear regression and Boston dataset
-================================================================================
-*** q2.1) CoefficientsItem..................................................PASS
-*** q2.2) RMSEItem..........................................................PASS
-*** Question q2........................................................... 13/13
+** Note: This is the development version of unitgrade. If you are a student, please see http://gitlab.compute.dtu.dk/tuhe/unitgrade. **
 
-Finished at 14:57:06
-Provisional evaluation
------------  -----
-Question q1  5/5
-Question q2  13/13
-Total        18/18
------------  -----
+# Using unitgrade
 
-Note your results have not yet been registered.
-To register your results, please run the file:
->>> cs101report1_grade.py
-In the same manner as you ran this file.
-```
-Once students are happy with the result, they run an alternative, not-easy-to-tamper-with script called `cs101report1_grade.py`:
+## At a glance
+Unitgrade makes the following assumptions:
+ - Your code is in python
+ - Whatever you want to do can be specified as a `unittest`
 
-```
-python report1_grade.py
-```
-This runs the same tests, and generates a file `cs101report1.token` which they upload to campusnet. This file contains the results of the report evaluation, the script output, and optionally local files from the users system for plagiarism checking. The `.token` file itself is in a binary but easily readable format.
-
-## How to develop tests
-To develop a new test, all you need is a working version of the students homework and the api will automatically created the expected output. This saves about half the work of creating tests and ensures tests are always in sync with the code.
+Although not required, it is recommended you maintain two version of the code: 
+ - A fully-working version (i.e. all tests pass)
+ - A public version distributed to students (some code removed))
 
-As an examle, suppose the students write the code for the function `reverse_list` in the `cs101courseware_example/homework1.py` file.  Our test script `cs101report1.py`, which was the same file the students ran before, contains code such as (we omitted one question for brevity):
+In this example, I will use `snipper` (see http://gitlab.compute.dtu.dk/tuhe/snipper) to synchronize the two versions automatically.
+Let's look at an example. You need three files
 ```
-class ListReversalQuestion(QuestionGroup):
-    title = "Reversal of list"
-
-    class ListReversalItem(QPrintItem):
-        l = [1, 3, 5, 1, 610]
-        def compute_answer_print(self):
-            from cs101courseware_example.homework1 import reverse_list
-            return reverse_list(self.l)
-
-    class ListReversalWordsItem(ListReversalItem):
-        l = ["hello", "world", "summer", "dog"]
-
-class Report0(Report):
-    title = "CS 101 Report 1"
-    questions = [(ListReversalQuestion, 5), ] # In this case only a single question
-    pack_imports = [homework1] # Include this file in .token file
-
-if __name__ == "__main__":
-    evaluate_report_student(Report0())    
+instructor/cs101/homework.py # This contains the students homework
+instructor/cs101/report1.py  # This contains the tests
+instructor/cs101/deploy.py   # This deploys the tests
 ```
-This code instantiates a group of questions and run two tests on the students code: one in which a short list of integers is reversed, and another where a list of strings has to be reversed. The API contains quite a lot of flexibility when creating tests such as easy parsing of printed output.
 
-All that remains is to prepare the files which are distributed to the students. This can be done in a single line:
+### The homework
+The homework is just any old python code. 
+```python
+def add(a,b):
+   # Write a function which add two numbers!
+   return a+b # naturally, this part would NOT be destributed to students
 ```
-if __name__ == "__main__":
-    setup_grade_file_report(Report0)
+### The test: 
+The test consists of individual problems and a report-class. The tests themselves are just regular Unittest. For instance:
+```python
+import unittest
+class MyTest(unittest.TestCase):
+   # Write a function which add two numbers!
+   return a+b # naturally, this part would NOT be destributed to students
 ```
-This code will create two files:
- - `cs101report1_grade.py`: This file contains all tests/expected output rolled into a binary tamper-resistant binary blob. Students run this file to generate their token files.
- - `Report0_resource_do_not_hand_in.dat`: This contains the expected output of the tests (i.e., the students output is compared against what is in this file)
-
-Both of these files, plus the actual tests `cs101report1.py`, are distributed to the students as is.
-
-### Why is there a seperate `.dat` file and seperate test/grading scripts?
-Ideally, tests should be a help in creating correct code, and therefore we want the publically-facing test code to be as easy to work with as possible.
-For this reason the user test script, `homework1.py`, is completely transparent and does not include any `exec(...)` magic or similar. It simply
 
- - Instantiates the Report class and loads the expected output from the `.dat` file (a simple, pickled dictionary with output)
- - Execute the tests one by one in a for loop
-
-Therefore, it will integrate well with a debugger and the homework could in principle be stated within the test script itself. 
-Transparency flies in the face of security, which is why there is an obfuscated `homework1_grade.py` file which bundles the expected output with the test code.
-
-## Security/cheating
-Any system that relies on local code execution is vulnerable to tampering. In this case, tampering will involve either changing the tests, or the `.token`file. 
-Both attempts involve figuring out what the main `_grade.py` does. The content of this file is a single line which executes a binary blob:
-```
-'''WARNING: Modifying, decompiling or otherwise tampering with this script, it's data or the resulting .token file will be investigated as a cheating attempt. 
-            Note we perform manual and static analysis of the uploaded results.
-            '''
-import bz2, base64
-exec(bz2.decompress(base64.b64decode('QlpoOTFBWSZTWWIfYn8ABUHfgERQev9/9/v//+7/////YAZc...etc.etc.')))
-```
-If this blob is decompressed, it will contain badly obfuscated source code bundled with the result of the tests, and figuring this out involves some work and there are no possibilities for accidential tampering.
 
-If the script is decompiled successfully, and the user manage to generate a tampered `.token` file, the `.token` file will still contain the source code. Since the source is collected at runtime, it will be possible to prove definitely that cheating occurred. This also adds an extra layer of security for code-copying (or simply copying someone elses .token file).
-In light of this I think the most realistic form of cheating is simply copying someone elses code. This option is available for all homework, but a benefit of this system is we are guaranteed to have the code included in an easy-to-read format.
diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc
index c296ad40721b7cfc0c42277ed6eca9f2b2158a2c..0edd5337296831e5e9223e6b60b503ad093cf02e 100644
GIT binary patch
delta 437
zcmX@dv4Dd&l$V!_fq{WxO1yfa*F;{~`Zxv#h7^Vr#vF!R#wf;IrYNRd<|t-HkQh@A
zOD<~^D<gwDLke>WOAA8^ODbbCa}-+&LkepSdlb7nLke38dkaGfdn!{ia}-A^X9`C)
zQ&ChZYbr-GLo*{ILkep!gC^%okd2zmw>U#XQj1HR6N^)Cu?3|T<QJ9PVr9w5Oi7)%
zQF`L@hsx{>3=Fq;5{rvdi%MJzOA~XPHJNU)6sP8-6@jEyG8FMne$AM}$Tm5G$=`sV
zfq|ij7nR@xaoHFc7+4q>7>Ypz2O|q(5lF;u@-?PpaRHD5K@cGXvW&GPvm__Ah;4EJ
zvx)^avqTse7(n91AR|C#aWNKw#BOoK$LA(y=EcWra!p>tEW=*J3^J5u@=<1GZ&nZ!
zq@hRx!~&@=0$U6+3CYPI*WTi=$<0qG%}KRm1UZ?9fq_AUk%viugM(3km4gug6^B%m

delta 472
zcmZ3$agKvGl$V!_fq{WRDMCH5X(F#|eGvl#LkdF*V-7<uV-zDJh|QG4l*=5&3}!Ru
zFyyjCvE;HwvF5Txv4O={a@eET-5F9?Q`lM<QrJ>inwg_GQW#QLayX+n-5FBYQ#e`}
zQaDl>o0+4yQn^z&vzdzWQkhe^ni-lwHn0RUXmY&-xk8hX;}%;`YC(Qc$;5Ti(wfY-
zI735Hi%Xmni&Jm0CMPCmq~2o5$V^G~n|SgeBgf>`j8YsN3=9k_8H)HOpJmKp<d|&5
z<gYKlz`#(%hf45+xa<rJ3~USx48<%A3=ABMER01Uk;yxmlEno<@<Jd&n1O-e7Hdgn
zNlt1J`($Hg6$@-ei83%SfXplgnFlh8i?IkKc8eoEJ~uHlFFszAXL3KY4AU*y$rG7H
zCZA^JN@E5Y53;(56~qGRyv0_On3s~D3wB76BuE%!N)gyOSnR#UVUwGmQks)$#|ZKV
W$frDvJd7fY983Zn9E<|29E<=vGg(#u

diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc
index 6059825c3966108ca837436041e85c8db3a0d602..c2a10cbaa71b99e5be06729a818f59fbbb70813a 100644
GIT binary patch
delta 504
zcmeC@ddSHa%FD~ez`(#TJ61h0mw6(eOnn>!149Z!3S$mKE@Kp9E>jd!E^`z!BS?%X
zhb5Oaij|SUogsxeg{6fdg(a1-nK_Crg&~DChdqkjogsxSg}sF#g*}z2nK_Chl{1AS
zo2e)&l{J;4nW34Hks*aOm_d{CCCExm=3AVhA*sbB&WXjTx7dPG3-XIfZn3gtWTvD}
z+$cTq`CDZU1_p*Gp2Xtf)S?pC!qUVX=ao#iSc+3~(uzP*D;bLTCVyuvWn`P2%akh5
z&cMJ>#0es}K?Dzo;DZzVlRq+vaD!xuSwPmaOcr4FvJ(KwgZM>)AQnP7n8gngV`E@o
z0I4VjS;@i3!pOnO#8?Cp^_x7MIayo?q(~S<h%hiP++r=sEXheNVw)_;qGACy1wnw#
z6Juat04Xm9838hni?IkKc8eoEJ~uHlFFszAYjQ4&40{nX$WWHaZ7j;(tRN;xLy;ti
t1yWxGwisbJm<6)o7Kcr4eoARhsvRQ(14A(=97Gs-m<0GZ7zJ247y*UxTbBR;

delta 568
zcmaFJ+0Vrn%FD~ez`(#D6QQ2?muVuOOnngp149Z!3S$mKE@KoUBZ$qE!<5S$#SCUM
z=P=~5M6u+uMzQ9yMX`a!SaR5-*xeaYSX0<q7*g0$S(=%nI8qo=SaLX{INcdi*i$%K
z7*aS=8Jn4-xKg=OIJ23G@=}>oxtbZ8K{l`iGiY+X1UW;Kk>eIyP-;PbQOU%0($bpD
zw>U#XQj1HR6N^)Cu_h-bXQbX@$;eDe^_zI|tuiMA149%~VsUY5QHg6|X=0A^N~T*Z
z#i==IMIfn_3`P8tZ!(rLa!mGTO10x)U|=W$dA5iL#Nq`J{BS~mfq~%`e@SX_Nqk~T
zN_<9UN=j-TNKG*d$YD&AKQVdP3WAigfe0ZGfzS<Rfs80(XJB9esVD|n$HB<L$id3Q
z2o{}O#hffI4ALh8B1AzJvX*3)<fIm{PyWKJVgWV;L4b`DXJB9e=_v-82{Mk0u?Qq~
ziz7ZhH!(9WK3<b&asZ3W<T4h8G-i;wAm<jbf><CeQEWwtc`5n1U>6igfrLTYi$G=;
sA?yOPKo;KOu*uC&Da}c>V`N}pC<Y1fF!C^pFmf;n@Nh5+uyQa000+}-<^TWy

diff --git a/examples/example_docker/instructor/cs103/deploy.py b/examples/example_docker/instructor/cs103/deploy.py
index cb3e657..807c37e 100644
--- a/examples/example_docker/instructor/cs103/deploy.py
+++ b/examples/example_docker/instructor/cs103/deploy.py
@@ -10,7 +10,7 @@ import glob
 import pickle
 from snipper.snip_dir import snip_dir
 
-if __name__ == "__main__":
+def deploy_student_files():
     setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False)
     Report3.reset()
 
@@ -20,31 +20,34 @@ if __name__ == "__main__":
     fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py")
     setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False)
     sdir = "../../students/cs103"
-    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['*.token', 'deploy.py', 'report3_complete*.py'])
-    # Copy files to student directory.
-    # if not os.path.isdir(sdir):
-    #     os.mkdir(sdir)
-    # for f in ['unitgrade', 'report3.py', 'report3_grade.py', 'homework1.py']:
-    #     if os.path.isdir(f):
-    #         shutil.copytree(f, sdir+"/"+f, dirs_exist_ok=True)
-    #     else:
-    #         shutil.copy(f, sdir +"/"+f)
-
-    # Run the student code. Notice this will not run all the files.
-    os.system("cd ../../students && python -m cs103.report3_grade")
-    student_token_file = glob.glob(sdir + "/*.token")[0]
-
-    # This will complile the docker image (you may want to change requirements.txt to add packages).
-    # This will take some time and you probably only want to do it once...
-    Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile"
-    os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .")
+    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True,
+             exclude=['*.token', 'deploy.py', 'report3_complete*.py'])
+    return sdir
 
+def run_student_code_on_docker(Dockerfile, student_token_file):
     token = docker_run_token_file(Dockerfile_location=Dockerfile,
                           host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp",
                           student_token_file=student_token_file,
                           instructor_grade_script="report3_complete_grade.py")
-
     with open(token, 'rb') as f:
         results = pickle.load(f)
+    return results
+
+if __name__ == "__main__":
+    # Step 1: Deploy the students files and return the directory they were written to
+    student_directory = deploy_student_files()
+    # Step 2: Simulate that the student run their report script and generate a .token file.
+    os.system("cd ../../students && python -m cs103.report3_grade")
+    student_token_file = glob.glob(student_directory + "/*.token")[0]
+    # Let's quickly check the students score:
+    with open(student_token_file, 'rb') as f:
+        results = pickle.load(f)
+    print("Student's score was:", results['total'])
+
+    # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt).
+    Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile"
+    os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .")
 
-    print(results['details']) # Print output of the remote run.
\ No newline at end of file
+    # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file:
+    checked_token = run_student_code_on_docker(Dockerfile, student_token_file)
+    print("My results of the students score was", checked_token['total'])
diff --git a/examples/example_docker/instructor/cs103/report3.py b/examples/example_docker/instructor/cs103/report3.py
index 8108883..c9a23ec 100644
--- a/examples/example_docker/instructor/cs103/report3.py
+++ b/examples/example_docker/instructor/cs103/report3.py
@@ -1,7 +1,5 @@
-from unitgrade2.unitgrade2 import Report
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from unitgrade2.unitgrade2 import UTestCase, cache, hide
-import random
 
 class Week1(UTestCase):
     """ The first question for week 1. """
diff --git a/examples/example_docker/instructor/cs103/report3_complete.py b/examples/example_docker/instructor/cs103/report3_complete.py
index 740b4f7..37c50b9 100644
--- a/examples/example_docker/instructor/cs103/report3_complete.py
+++ b/examples/example_docker/instructor/cs103/report3_complete.py
@@ -1,7 +1,5 @@
-from unitgrade2.unitgrade2 import Report
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from unitgrade2.unitgrade2 import UTestCase, cache, hide
-import random
 
 class Week1(UTestCase):
     """ The first question for week 1. """
@@ -12,6 +10,8 @@ class Week1(UTestCase):
 
     @hide
     def test_add_hidden(self):
+        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.
+        # See the output in the student directory for more information.
         from cs103.homework1 import add
         self.assertEqualC(add(2,2))
 
diff --git a/examples/example_docker/instructor/cs103/report3_complete_grade.py b/examples/example_docker/instructor/cs103/report3_complete_grade.py
index 34e5f3a..e7244c0 100644
--- a/examples/example_docker/instructor/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_complete_grade.py
@@ -428,8 +428,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '8004951f000000000000007d948c055765656b31947d948c0474696d6594473f6067000000000073732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f505b000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/cs103/report3_grade.py b/examples/example_docker/instructor/cs103/report3_grade.py
index bd7418c..8d21569 100644
--- a/examples/example_docker/instructor/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_grade.py
@@ -428,8 +428,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5069000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8eb8600000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token
new file mode 100644
index 0000000000000000000000000000000000000000..09b7d0966cea33dd01451ccee5e5befd524b1dd7
GIT binary patch
literal 138232
zcmZo*nOeyJ0ku;!dRR;HOA>RYcr$p5v`y(@Pf0CF%*-jCQai<)0VK^>KE+$4hod0B
zxHvN@Cl$=ePbx{w%u7v~;?2;*npu*X3sT<0T9R3klRBk`H9R#n+i*&0aSwZOerZv1
zDo7S$7<*M_L0V=`>J+E<QcMg1-pni_V2AMfNCk5pVGl54W?%qeAqED9<YGeuWBrW$
z+|=^?qHIIGf=VSNC0;Jqip1Q4oK%J4lG2payb^`v{FGEZg^-NQVg-=N3Yo<UiKQj^
zxrrs2$%#2Rl?v&pd8tK-C8;S2X+`<D3MCn-V1>mBnR#GNW?pegQE75XevxiUW>G4L
zQOU~%wkstyO`#~YEVZaOH9jY^xI`nj5=3cQDe!VBC@3g^6@eJ;nPsVY3W*9JSp{{7
zDs_dT)RNMoykeMma(-TMW^qYoUb;el8rbUM#N1Sc)ST4Z)Vva~d!Uvn<QJu+7U?Ot
z>ZR*JY=TNaToG-kW29potEo_&kzbmV0<}jW+E~X($1qkwBN1k$CR8`voyrO!{x1Gj
z3Wf?fnR%(j3b~oZ#hH0<OA<4SQx*L3OFTg#402Rzifd6(evyU}Ttp$QG%vX%Ge1uu
zDL<uBNt2h0mkSh3i76=>i8@KhL8AZ(m6Gy&g}l<-q|~Bfg@nWeg~YrRg@mL8g%WU-
z!AygCr8qOUAg2=Mqmqo&%p!&2(p)P@DA*~$tWmI4NYqY3L;!6(mYJpyAD@?)n;IXl
zU~8+O6d#|Pn3)$JuLO0EvO+*neo{_qu7aVJLU>VTNvc93B7n*>GLthvE>=Ja0*FTn
ziZb&`G}4qJ^Gk~qic*V9b4oyI!bm||!AQYYp&H~(BON2nT2KIk5*SQF5I8l0l2D>T
zac*Kx4mimv>7XXlXd@kC9aA0iSWQh{F4SCB=Kk&9UM2<x5Eg*vvZB<2{Gt-$fyiUv
zOj(+jS(09qn38IwhhQpX=7L<M5E_zNT;iNqoT{S`1a_y6LPlmvDlZpKt??PDIR&Xj
z#YQkAQp*x^N)t;`;~^f6hZY{7#G9OxSX`_CF0wS>Hfcg5SV>7qAp~4t6cv{!6qcqI
zgOh$*evv|XYHGHEp&lX$f^tDgYH>+CDDsL^bJAdjfZ`AARB(x=2QSfJj!R5QfustM
zLXbwi#Ny)Aq7v7_(!?BR4Ulo5w4g~yk*=YEfsTSHD9%A1gN7d1QeG~IHzDC?tO226
z4g{Cm3bqPL&cO<X28Pg3RxnnA*a-7>v4X8av<5h&bQFvXG-DMMlodSll5<K^K<OS-
zkmRJI29lwHLUK`RN@hthv?MA>OwNvnxD0F@*mbdB=Mu<kIHC~h6=MyMw^5V&88LbB
z#hi=`AS{ob)Z>%$L4{mNYCJgI=@nG+azWe$Dy#}B6%vaT@(Q47tRyk1G$*kn6&7PK
zK2$m-u_U!5GZ!um<HJoXs7%XD&q*zT3xE<iD6N1!4oT&idBp{($p|I+#c<V$Md<~J
zMa8Lbf#OPpB*<d8L`g<bYGMkgcu-bI$<I?Sfw?#YETNE;tOqJ*ax?QX(<&7*i;GKB
z^$?m1Q*sqx`cm?t)g?#}8Zadwc2a6`Vrg+I!Wdn!F(99S9a#h_nxPKXb1X_P1=S+~
zAm<lpq@)%n7iAWJ@{p~%D>P3kR6+_tP+r$l*HK6<$jr%4w^dS7f>!mQ+BPI#p{O)Z
z0aW0D!VO$mCMrO3Gf1A7%TA%7vLquvPa(0mI5RyDWUFC(N&z@xdAXqa^Yd~l5h>J2
zA-@QTr)LQA1a5N_bPE-X@L2>_05Qccza$k>!$EvptdNqQTC9+lU!nj`Zz!Ht2+7A3
z)ln!dPPIaD5yZQONGX<=%Plhx)XLET84a>Au|y#=4cT;1gQy^}2wcA;D&*#;l;)%=
zXn^uzVtT5k4!Al7wTU2xD?ps9P?TC&npu=utN_zml95=V0M(@fZoKH`D&&KsQUO#P
zLIXcHu~H!^73B1^(i}ZrF1P$51yBPkF)uk)2jns&t6<Hw%sd5`{A6%CDb`QUF9o%>
zpf>44TxAT;t$GEO5GN;~Dog-*GZB^qpt`~C%u`6tNX$!5Re(3{6iV_H)Dbp9S|mlK
zd8mP{o2!6og&xE$;6zfKnyQddQc_TCrLUiopIod5@xESuQM$gdo~3>%s4&z|&P@R|
zv-C1ba&y2fN*z#8<tCPtq!z`43uIeZ*47J3EDuSoDDg<mDR9d#f~e8t<$`1>y~LE1
z_(W)4(ookeRM%0+ODsw+wpF)R*HI{`EJ(F2E-BJcNJ&jgEX^sg^~=vo)lmS|g0|}Z
zpfnFFJt1ilR-!9tr0S*XS%K57raDgRbc-|c%Tp^r#ZzjEx{iWEBDe);t5jT)Uz8eO
zQdF9%q@w_}E;u8<9GpI28uU|Ai!+N-LCsW9JBNTZpkleSgam66^NPz;iy$c;)E7{I
znox|#qSCzF(vno7e1d44L7HHCdU~25D{>R_QoyAU$WA;~6(klHC+6f3vda;r%L3Ab
zDC_hTLW@(=N^=y-Gg9*uQc{yj)6+qXc95;0@&VEShs7{j6Gx*6+GK*oIW%n)g1T)`
zHY7Np9FX0KIXO^XW_lhdilP04cu<zLbxX_vwZ%Y*BEBR)CqA{P2qFe+Pk>UUf-R`%
zC{8WX11Din>MhoQHeoW;5T$Nru>vTuf{P)LKD|OvP6m~eAYo`T4Ad}!xgS!!6zdg2
z8YdvB%rpfhJta_o4yFi6A4maASVsZuZBRP~)c!8h1m#DCGGv{4#RWN;B^pY4N}90<
z8&Y$?WiiMs7>1dtV5<N!3&sU^h(KyG(-bliixW#qir`V81WlSL@sLCd4o)Rau-`#G
z&M($0NG!?F%Pa<kGgO-%nx<l~E`%m5F@)s%B5;?^wIVqcTvTf)*(rc~bXE$WW^F-H
zsxHiU1&Eu#ePbmZ1(;hBlZruZ!tWRz1toa(mRgZnT%xDooRONG4at?z(il<?<m4oR
zs((<w1`@oPX$p|^2a0#JIEPBuDu7c4ga_9JO{iG3!DOKNpj=Q^0YwZXM;GfTlq4qQ
zq{gQtmLw|JDxhT_sDE|f3Dp)B!8)K`cX@n4QGR++YH@LVQeu%UDDoh#gQhTBa5~db
zC`&C$$}diZWDgw$s7q|2G}I_qK?$l1iuFL*3MK+o0E;4o0+@&n#8z7fr2`2YkS7Ua
zkUjA1i_8J1V2DY|3MECAR-k4+s2(p$P0uVYNi703FN;eO;ca?luw~FTBB*5xZsEii
z6lInrmZZW=0x5#3$t=oA*2_svE-5WaEyf~WkyD<TmyRL>?my)hq~;Y=R^-4Mwc+_i
z*-82N*;tGyNX$zC4TBUb6hJ#y5LbfDNzTs!*`Ar7R}3=>T5+aif?D7pr$Uscq$t?J
zDpio2MowaGQc9wgLUb&&EQ6>^1&xw`8!4c=3sk;93lE4isDYGisH31!sG|_C2`X+<
z^Gb8Uqf*d>rUy%2#jrvYq7&p4ux=wA1yG5Ns<%)NB!)!`NLxxuv^vbx_{_YN)C%=j
zy~Kio)VvgpY(o_1gOr0XNDIhB4CO{RmBVa6m|ds`8Zp7C0c1WxF^Gp>eMx=}Lb(B`
z2?@$=u%Z>DUq?Y5q)1Z%R7!(GNv|Y72de{6;|UfTxJ*S=t&VH~suavF<b+rV&LRr7
z&`ye8c}ixHg0_N^zLJ7AObirIAThm)oZ<>-Z(AAJkjylAK@6=FAjV=Ql9YH*&kt1V
zrs#oK@u?NbsW}=@<$5Lg@!<3eb15jc!Qln1D-fv-ZV-xkWJiJ1BPbyjf!fs?h6V-(
znC{6=tyHj8P%kRYi&xh!E-BK;MzI=Xe`cBjNL6N@0>n-Xr+^ftq(o<@R>msWf(N=Z
zAS$E5<{GHS>Vf)@sm0*VKS(vg8=$m6z(CYEMD`)bC=gC6N=<~7F39qr0clXdqhPB5
zD}!Kd8;A=)3W_sQQ%e+V6+mo|Vm(l+xeTQ|1chXBevS?-91x{KN{U`~Y9+{H7@h!`
z2R2(TIW;FoqbR@J)=)<wIX|Z~H_tXXKS$e86A~;6wjgtn-3{^+$j+kta%{y3G^{Zr
z6QmD|wMF^m+D5pn2IWks-B1c-3L-R$6U$Oz$rG!B<edECRCut$lz@_TL5YGdxF746
zU*eWung<!A294N&I?NzzAh7@%VKOo{foTOvfNEY?LjX%N6dKrImEa*-rGTRRvdrSl
z{Jg{*SQ{ZTKM&M=0Vx7u*kF(bqCEjk>QLF7R0Uh4#s<VWAUQ~n0#!d~7z-o^!Vq~S
z1(0nJU1^}v73fHjUV3UterZW+QCd-AZfdaxOgOo;s3<kB1T3ndsTpk;s|RXMg320*
zA?YBlf-SscNlGkE1<7ip6=_CW=^Dp^iX`|TgAzm&Qb`1E;T3Cuje(^VB<F&MKA>Yd
zpblTLLPla)DrkH{p)$2ZAt^OAPXX4HNKMgGh{*#D&OqB1kQO7niB^!4npm6)9{K?d
z9Dp2d1quz2bs!84Av-%eB^?EbgCNdDlJta3^ML1Cauf6NQj0(-r82)%p(qg~1)Vt3
z1DOj;8jzVA=*WCANEFh^Dh0J=H40(AR#wPRuvMtaEYOJ7$c$DmSC7?E$c$Ep&6Pp9
zu-P*8SV(^(6I6B<Lh@*_h9;~fk)dEqi760&gWQu>pqE&jSX7i)sgVIqiBSJ!L|f??
z#Of%3XhSG%6srUBA9y%IArv+a4;qUIDg_0W2B<{`8gtVFiGhaVA>yzw01bP`$7kkc
zmc++vfJU!%6tqCibsYsQt?Y6T3#JkjC&i@&sYM!^dT<@+YC&#+gf+nyIjFM$Yi~o?
zpxh1WyFz=YXsvHZ!yVSPhPBDTO<!1h8Y&2BPD42mZJ@D~__UnFblZ@k(o`LV;>?`X
zyb`DqNZ5n?hm64{qx2KNTJb2x+MCYHF9$V3p<~Q?dHLnALNT#8IWsdp36iF56=0*x
zdXT~9_%!f1l7<o}Qr$t5D@vg8r@RtdrIh^qTu^ccxi$$j@E?<>q?eVSnWv!;odfO@
z=YY~EviXpHF=#leSVI%k3d>2&)5y^)E-A_^(9qOWuv0LIRe+{8NUlr*6(b5|pyp0|
zJZKIzGe0jr9;zgzBp#$SFI~Y_AuqoiWKJ4rBwIsCHAP=FS6?+!LDj=b)z?Zj80Juj
zsY<~miA5!;DOL&~gW<+NjRlqXkO2=p@T3MLtg&^)p$>xfzM*=cf}k!XsF4XuR7f>K
zv4Xa(f)c1t2dWRi_M_^9*amecba)%oSpvDVSVuug12k@~s|!j>;6R29pB8H>fx;7<
zT0m0ZR0eLFDJa4GuB-qtHBHGUKQSdW1=?9J29MEODS+qyz~kqzgbW>FfD9JsC@96i
zt1Lt<h15;S^HH!>urPpl02DQ#0b^SQP>kq-2v~|MPR=g^)n?VTpnM8SKFXlMDe!n-
zacW^{YF<fZVvYi+xKPLexfC+`o0^!Srw|MpGfgdmRnd?FCm5~?RI-3QSA<;R6qi86
zkjoHIiIbjM0@47Q74!qwevpy}WHCt088oSftOw*&kR*6E7nDk11rdfpAXNw>ARz+M
z4Z<L~<iz6C_}tWzjQkXkIiMMT4Ujlok6t{usU05=Yr$cf0da+1aeh&WMs{kYEl5AI
zxq9I48Z=FVECyj{84gkl(GM{f8qBr|Sc4L3hB7QRY!#4-b6DWQVi%@Lueda`Bo!Qn
z3bqQqmBAn`EW$zU$2>?^ww%bWEXeiB3WW-`3WYGMKthG_8JQ_5sd);v3g7}8;vbNV
zvI3GEXnH9LoLV#r_2MDp=JD}53Laq1P!EE1p|$5=IT$j$rfsO8XbT_BLCV!2oyqxm
zC7F4p(6%{91RMgOP6s#<>wy9pl-`Qnit=+o3{Y%q6e3v;%b{S6NaBUyatP#gJ=jtL
z6n`S5AuOb_s!&Hk2`s1t?RggJWu_nppt3?Cq?iL44_1jVx-cFr02+nK1NB0b0%4Qa
zs+m>_s>MpG8hP4=Itmc?AvqXQ(4c77QAo{8v4s~#pztZwgEev#Y!#s821Fdz+CdYK
z2Q6!;RZv!NPb|quEmDA#B;X}DnZ*j==!H%ZgN#u|PNoP?fzm~JW=TeTerZW@Nn##o
zrW4%s02O7R4hMV~1zf&@L_qZzbeIHG%7PLFNEohAFC(!y9<)#>H3e)^Japg&<cd&G
zizf)YIzta7KtSP*628g`9*N1>l?a!Drp|0ZD;Fxj<rJjJqycG=WR_Ig8i7`9fLh03
zpW7nYq^Va_nx}!DjFdrbQ3VoAM9St+CxHEd-SjF2TLpMW3FLESaQJ0m>m))06S;7O
z^tM3R4-~Y>wI?|5!G?%1B(k8H5~K?$M?#DPIRi6aLPtkHk`QGeTjQ~(iA>0RE4UU{
zP*z9=k4S@xFOUd$Djd87DYvv3)GJYdBm&UnU~y`R0yy`;RxKc`gRnBwz}*>;yKyHZ
ztUf9RwO&Bo!OYxLl$sT6Fe3dxJO>g?PFAo5Pmy4g%uIu{PLW%gSnUOwR03M(7Y|y(
zk_%c67Z0k~K`B`eO$b({XBLCj)un=40id{pD+4DEP=BmYA+rQj@#=wwh>J^eH58%?
zjnIt)g?OP6I3Hq<D0sUO<Vb|aK|HYM9g|Bk%TfcN1MV6nw%9zP1Bo$PlzfU*tRc(=
z4*`R+M>?oY3>yG;PAn)X1rKd2E4cXksfQ>8x%&EtxhjNscm^x@c>1|w3x80;0}l&m
zsKX6V*VKUaA0U(_cmUlwr!+6S5@uU*WnM{Q1t?IV6YVMB^q8NffVMOP6bK-Ua6X6!
z8kNW99FQCc6AW^kX#}JjgbR)IKwUc2+6E+njth-2N>Bw`g^(iT$`m8hA{(NtfSQdU
z)`Ak0XI>_#HJMojTGxPBSOlLA1^Kc#BM}^FMXAuGrYMyNvOUm&BluJpXb=F_76qw8
z4PlT7G?zm=l>~zfsvOcWB&Zr>0i>#hjPHQ>AdKk@kSGX4Rog0Q85>|t*dRs77@{Ff
zDI7Fy3<}oF#GK43P=^FwB3DBj5VhbIJX{TUiVnQ8Rspo4s9Yg2PXRny3hR%6tRdvm
zR1zGi6q1puP?QQ<T?uj@XwgYAXoV5DfQGxaJQKX$F*`M<QXwrdGY33m19A!o6LO9c
zM!;dqK_nHZ$;o;psYSV&d5I<Pfl5rL6jv7O6_=#smzL<I<&+j@;LcN!2tz5sur$4}
z<|dG)+*AeIyaK%<(1@sp0=U?QRx!HZYDY%_HjM*zBsT9uT%e?&q@|FXSfOE{ljozL
zs{rcigDfb?Nrkj0H5D`!v=uazKuB8wG`bIx&P@f0Dk&7FCg<m+K)baN2?aY_1p_@p
zc&}DLQ$ZQj9D~dif$9cmPr4wn64cf%hO5NmN?RobErmQEtjVkpdsTy3m7<%ERvdxq
zM+IetU{H%7BNeoy0kq~Eo+4pOo4_?9XoVnXHE&vKQEFatDn>~JD(6sIjkXHKpf046
zmX?-6p=zd{YNjSq%RCFzJO}B4wDFO>iSB5kQ!V!T6l@8+b_cf$VAUx|3faxbt^lPX
za6Dlx>>x=AT|X$<D5&eHYeABiLY|MVi7qI86&Gu2DrjO$?qF-+Jw>G6AjoXI9YRQv
z08VJ2c0a@n497xLL7ZD$jMD|m3WPH<IGYjOy3>G;)axj~`cj&p#Uv%ExuBu__=5b*
zypm!K@VF3s&=Q4*zB~Xd2^#@ITN;36?Ez#26H;Gc#A{)EbQWmVty;ZY-3n4gm#1c?
zXO!qDAjY??;4aruP=|H=)U6cY9#e;exw;i-#db<+o{oY#%zf%s;DQ4Zi7-Lvu$3lg
zN*A=51ZFk3HC~IJ>`RJMlR+5>Ug&{ZwD^iVa8e>6vk)rY38x5<jgX`T@+fjn0_6mC
zJ#__GNvi-ZYE7WUEhsx+%?;>b0Bv<Z!WkoNgRF;?F#(Rj!QgR5XxxDIGr*D^I1?*?
ziy=2hPajto=$sEoHzap}Sea=GkN}62<yh?ksVjz#lWHhIss^wow3-3k7a)C*U@9&K
zuRj3^fUvScc`+h?=qSJvnS!kXbS?%oqKq&O?j_K?d$As9u&g+!G!LNw$pE-oNDWt%
zTC4|d<Um}2(4q`a<)G}DSE5l~tl$6_L&-Tvrh^uRffd5U@F~o#gw)y~O&aA2T5x}A
zD(J&)Qqa@{kG#X(ZKq%Wi8=$=;2bE(VD^Ego)xsfi__pjS_+1G1}H`%ya^tWiOxf0
z`f}8)4&uTLQn!M;P)9)>l(dW0trQC5k@78MU<oPD7V5!tgXSKgJID~$ql_rQyol_t
zVnYR6g*0#~g^y(wR_AFO)*>a>V$k-PG$n;<gl)C@)#b=apy|vIlmV6Wl(ZBy@_Zmw
zkfA1oHqunk1SQyFBiM`vC|n`>P+RMW!F1SKlIS$VNE_63Itpn@814bh`awKTAc2FU
z1)6qX!2(T(3b~*@H%w3y=2)a)2Bjy6DwrUs>87mUoLF1}8tH&6TLz^Oh+AN-Fz9AK
z&~g$e6Ga|o7)C_mo~we-)L>5dAkFhYY)JtPSc066T%JQ`C7>aZo0(Tyk_syPQ%dyp
z&7dQY(3U4?x~xP&7u1&qDbj*T7vvP>fW~yeYq%6Lbrg!Bbr2}BmB14^#o$y?3@#}%
zK_kY9HY+SGLefE+k~4Gz31})xTLIxeZ3Sq%7-XFW)S)^GN)VP3*e)FfC2a+;Jk-fL
z3Q7<bNM1=3)X##11(p=16apH^QPNRRQUb56fkqlEQZ%)d^id=b2?UhrKuH-ozYZFb
zRR<5Is9Py$!1N=ETTnF&-3|(>twD1?(EMBk-q{EmlPt|CL0L)(3MB{zkF&rQH^2ry
zV0>bC&OjG6!1t(R73b%{IY?V%lB$g0!UdVh**S>KG9Xou8Rev^a?l<<4Ui$A=|>%f
zlKkw{JWVSFWl&~FPc2bM1ee1K`AKP|#mSIvFsQ`|Tc!ve?@3QB0Wa%KsxksKdkRwX
z)HOB0Cg~_Bm6w2~6pIzokm|=YJyd(aH5_;r3p^GM+YhCYnOguGR)vpefZ_-m=((V5
zpIZP)7myL!Tu|2uv?c*&2GmyAYy_mZtj>kcJ-{~<rYYDefP_IyD&pf|W~P89V_-{v
zQ!<Mn%R-=e4ircQ@gUn0(?OGf=<4F3j);%f0GSNZ8y^puH`Y+sQ`d|(hy_jjLu|xh
zCa5n-q&c8v$?*B)%rvlXz@zE$@o?{gS~=>u;I%^P;IU}%L=!ZSKsli#AD@pv-av6K
zNDIU`s9#aWx{%hKffS;eO_Hzl;z7QPHb9z;K(`j`6HrbASw)YiL62@E??EFIy1W;<
z#~YTWprXYkMX8A}F{l!~sLTSlOi*40c>=01Gasf39P~Mv*{SjQNm;4M;94PH&#AH`
zwb;{NBeycXDzgB*ej92(h?`WJmX-=$C52E4l?G>CG@CULdUO=j%hfeO!-7?r1xQIm
z8MMF$nuDPxfm{n3eJsk)FVO*QEGpIkubT&N2+1$jD^JYH)<6y)B;#OuL7oHUUJzZ3
zCJ$1RnFcmeFEuZv802ydCD86%Sor`_fsE5Y=>=3BL$?rs7jr<I2eJUk-N?GikaQHK
z=77x5fJHO935YELuyO&}gE%ax$}E6}9N2+63T5E3599`T2^XynjT3cH(-RRNpiVG&
zc_(bZ3`qx))nHvnd`RL!Qj3ytz^YM1VCq3z^+6RH<ir9{)c`LUV7UwwB}nlBTf>AN
zF9cHtEQnzNnWl*ztso;n@kYG_0<RG=bC3sSK`{ZtFeNzRlC;Ev6eBpy10`w&(2{;U
zNlv{8k|WcQt5cYJVKgGH%Rnh8Ee*T?8kTC%3T14*NJEKjB<CWWn5LtE(=GVzM>Lfn
zDGQXCAkC3<NbO%*02=X(FUgNjPRuPREzV0Vfo=-WQOGYXDJU(8PsuC-@5h2A6HqM;
zTdf0a%MrPq1w2FqUu6Vcz6O~?gpV<SYZmmeC72*&&<HfY0}Urga~RPrg7R&ZwDM3@
zfOo2bC(BB~4J%L^7ZimM4X}nG@+w-Wer1KkoZ@`&3KsA*1!SfNGCcvFF$14r0WK|z
z_4M>W0Sq!XGYvd&4Q(A~=A~qoWu}xS=0I!iV(^+Zq=FaX4kf2b1xSp5CL%zSoO<A<
z9oTps(DGE!0x;0-X^<Q!j~1h>NCo9waH|Zby+~GqoC&oSw15?Ba|0^xSd<FdN)MX2
z)z8gO$xO>kO;JF2DyLE*sS@nE%shp}^rBSonIa0{1nd$LTC9*-k(jGcRGO1o3=4OV
z<xoe0+H|mB0WETa3L}h%Ny5Sw5(S{{7ic9I$RyOzMm7*fI71E&00lBQ5J4p!B;57%
zQ08?&0h+C&01i{c`go`dQS3nr*u2c#0+fucV5<Pla*&oru|_U5g@HT(!VpvAi&8<G
z)XGxhbMm2m0K{TxSTzaln1CuNko%!I7BL*HiDEUHJs_t=tK+f_Tu0%Ogj81`$AU1#
zAW%yitO=BWKpa?u9;5_>q0tqc4Jsc&dQeRPkC1}~;iI#`6B*f{$N(t=P0?gSmOA7q
zKn;mSk57;VkVJ~Rw*gWE!pbmHK;0TGEvS*8rNSWVp#2<Zi59I6l?Hna%7!ICWd&F#
zPQez;)JrMNEhyH2>eT^d4bb{r6L@<Aw16F6nu2EDL90NJk_@7#g*1xNQ%jP|!ON{6
zfd!qJ0xdm?2d$V#?CgQBNkeoyv=x-%Gr;SUlwdA~bgvPeYp{OM3ZUX>XdfKf*e`=G
zj8%#+Q__Yc1U>lDRM3e;;H92$5yUbV$Y`Jv+)hZzWvig1S{$#M8K0jPubQb^3@Ol*
zR5j4O1RGvOI8q1d7-;&1nFnjYfZK?OfCD9YP=6W{VBoR`T)%_0ftuGz(Apd+*dd*D
zaO@!>FAW}Rkc5R%yMPW%NG$;sh$)#R;53W8;Xp?Lx^Dndpg<~(%)Im*$jMJu;0+a^
z<Onq#RQ7`12=yjZI3!;IUL%7`()`jAg~Yr{&>;$$X_=6sp%`3PK`L}e8jXic3YKJo
zW`#ky5wz3`6k~?*5Cx!ppx{|fSal9s>8_!aT9KNpqzNrZ5NgwN@{<yCKs$jT!CC+s
z!h(hxsNqnokp$`x>4An@GEysGkqX;K0!cp54Xzp>=V-!|*cRmIfaYK_lS{yh+M&J&
zEj0r<SFbp=Bp#YXGzwt*>EO*7kP=YZ24RRLFgJtteL+fCh5X`}yqG-jikgrZ#AzhO
zC8cR;3K^+IsS3rJdC94;t-%<lParKE2N?t!Gf7TN&Pa`i45&c5g^<1%cug!~$1S*p
zVil7Iie{)|K$A@1ExVA=gUDeY@yN_c1)HY{Q3Rc2fhGG?&=%)njg&l1s0AQP@{9Fy
zvr{sQK*DfckOhCKR?y|!N}w~MKvf>t6QGI?$?Z_P5ZMM)uYg^lqY$HBo)n|5qX6!n
zrXksi<|jx#f(Hv|Z&q#r$j_PidA3S9Rk?{uP-`;NK<yaN=480zT=Mf$6+!Z-K@IgS
z+%+&yD}$RvU<WGLDkSH{Yao<>Dh{w8;|q#X(=sa{1ruBz5<bwRgchoBMVcUI#N;93
z9JH+w$qr~JV2@nT$wZKIb-<|%c4!UcXcX9BBxv<%egU|6KuTx|C5h<@;H4(uH6{?Z
zQtVk!^#kpj;|fqvDgv$Sg4hV+fiRBrP?Qt{D@;+<gY-Z{7Cmu*iVKZ2Jc$CN7yCLX
zR6Ae=WD%$lS^{mFCn_W*rYJ!27C31rB$j}7oS@|;kR2clHNz8f5*{el6+oMl6N`#L
zW3fr8xK8ndw84`>TepfTK}u6nb5cP^GnMEl<b#$LmxB(mOUW<K19?{=Q6Vq29F){S
zdp1Cc8)P>OL)?%G%@QzCPyq-Y4S@Isy!cxq4XOy-okWBNG^9ao2}p8{$x~8NipfjQ
zECHYVrl+8$rU2q3=jY~TmMG}vDk%Abk76iRf=LyW7H5Fi3I&-3@RBe+vqZbNIKxU`
zKRvU=Aw9DsCou_j6og(%NvU2+wth)zMyfu1C8Hk58eQGe0`R&<kZZs$f#wg8GSF;j
z45*=%UsQs8$`xD;R9r*V7grX?<bmoGh0J_dehDrCt+n&?hpR{0NDmi-HjLr?qEt8&
zw3ZBfIu$~sFeMkk$;?Hlg@ip^F0&-Hs3bo>2V9>(XG;-NITcn4@HGGxnxM6Vuw|8c
zF?pa#G8Co7rMVgvnlX8~X*v1%prJy@rhWy8DPTdcJf!R?PAw@d0QJD*lPV#b4>X*0
zKn*~U7_2M=)vF4&3eJf3KfEmnNh`_<FfEYQC1|J(vf~OS4m!~%7nT&kEppJB6<Fbz
zSq!RLU}|C3LMG=yHBVx8YD#8Nu?AcP#9C<bgo#4z03{9ZKsj^^1+*Xq6||sp^}w6>
zU^amA1k?~vjf&9flb@cRTBHG~YGA5CM!-hJz{cn(K*nQX3P8qz7Ysw!&4Jo7h(#0-
zH^bHB<b!sHK@0#{43YrZ0IB;y#^@;IrWO~2&ar~}0i*;Z0b96{108*YDZ{h@Ljgny
z$R5aQJ&<w8&R2#Up#+vxfb0VT^(OLCQ$hQ(K)qGyq^dFKn4p5hlFX#coXnC+J&3^|
z7lKxlf;<F?1o*;X4bWa9m{lMLpams#t0ZJqF>L4tvB?r)KFG;>5M`is65zv*kYovM
z!2~%C<V=VYQu9EkRl&A)CWDtrr-IJ>$uBB~G#f$FXDRu4p!Qt>sD%tYr>Yp-z(evj
z*je$ArH~4?NXw~^+?JY?rU#mu%gjs9%vZ2gfE76!(Bc>5F3<uVkUEGDkzyVsiAYTf
zwhGWh3bPngdS&J#nS@rDW9UcK2n!dGvdlCE)Sy6j2FQW&pwm)71q{?tsYOKyuYyd0
zh^H2T)F6DSpsbLanwwu#sgRMMld6zeg3ttZ5URzXwiimIAXGy$E=Uv1?a(-_K;Nwk
zOLkyaf!v;2Q35(B1f&-cA)wI?=#V^QUJtq-6e&nkQgakgHGzy&R!A#KO;so@P$<sN
zO@(?7;ZR68fR@OD(gnzRXfi=4fkis1=g~tQywnyFo-jv%10CdjNNx-CEJ@8p-C+pY
z%LVg4c%d0|l_<n=P^Fnx3L0^RRPrG2l;nd7E(3@H)P<9vZBKe8Mi41b>lf4r104zj
z>q?;*4nHymy6_Du32ME9vK{Eq0nlJ=Y7wZR16f}Rnx%%0azOJIWL=(ut%4zF9y=pd
z0V)7JWVjHz@k<XB2q4!((g0|&H+;u7ObvLH8OsK4h<8AmVetS80?_&+&}qKlpoWDL
z^gu84m;ps9Oo=Tdg(QMcIrT9Lf)2MKEJ@9QTa#K`0t$UnO$DEw3iB|?-C#FDt7$MB
zbJPZ5zcR#u@yYo`paY?yk%JV2*d-7KfNg*UDrgNcG!9T)M7SIVIUI!Hp#fSG394;C
zNfB!ZgB3u-7vx)*h2S_ZPE1RUPfsm@EJFlW(6DrmYzs=Ng=7fityBnKf;1tOF?yI9
z5o(dufz3pA86+k6<rn3G9R=y3fHIAa0&G?SG`0jeln%|wpoC}(*&&4%B48OvOqS&5
zAe@qtnFd<@06HcisaQin1H^!ymjKJGntEwDi6y$ARwe8x1(-uLA#w^xWnNwpXlX`X
zfnIT8Q3=R6PzzH5Y>k$df{`Y;CkqNJ5C$g=@H#_y<_9f_D$0cg2uKlVz@aEt!A_wh
zKL^cMkUmSAl3#v_LMrHlVDQisxGS5JS_C?0Q=z&fKd07OAuqou7qp22<PgY`bcOQ7
zVuk9wqTE{SJ)txuSIA-Hx{yv(CTK1lw3fYCp&Dcg=v<}D;t~zexzvd81UVS&D@Zhg
zvz`VlB#~n<w>aIFu){%4gBT7<v=B3Wj0Q`>P{5HWkkbP=FfgOIKo7L&9Tbiz2?7+?
zNK%wUx^1nsLP}~`CU|}g8reuj+YaJrx2@GtsLsQ1EM`K0)fPi3L4e{Mv?vi&@aQXO
z7^do4C}@KV7n}tUR32I`AqpP_O`=m8s9Z}b)&N-!DuO`S6oet^4OB#dWFW~6GN%Ix
zY>*ldCbpmf<yVk;7>4N^bVW%)Q7Wudf>q_<M1)+SAXif;)fsdk99Bq!iYA14IE@2c
z5CA`76S2Ak)SAnL9-Iy;xl@ZviWNYogMdzX1$80QQ%k`2L_n7#7UiXDLK}OaP=@s%
zL2QsipuU1OQ^0=GfgY*}=_aE15PTvi)=M5h?K8-<I%v=o6t9pJ0kQ)m2Q?XLT?(RT
zM>Zc^_<>ZXmF8io1>i*=$WTyQqyW0jB{e4vF?tVMZIF>)9uG_V5U*fgmjLMs!c>65
z2k96f$PO2nB(@H70d$cws0|JAGAL+a{)9Hl5fUJ$gBMc3RK&wQiX0LkGa#8k$<EGB
z0esg3WU|Z(6t*B~7=|c=83zv|kTl#fSUALMz*u-JbA_JP3|&E@pplnfl4_*@I-VPR
z8W;E!u7aZc<kaHg)D&w4(Ac^{Noqw2bP7fjhZEse!!iQMiO|+Cyzd6(L(?NHb0ESG
zbC4T;2rOtUAqaBzcnT<&fX2XbGb<EI^3zj6gS(K46wu`_X=z9~0^|x1hJ>RMdho&m
z7Un`IAL>A~?b--;5;GJIDoyi2C!>Rpc}8oift&<Y07-;c<Uwkn&W?%6v!ymb4+@ZV
zAPlh*x;eO{s1h_QRRTF=AC!3^Cvqsns8@sYGpH+38>0?dU{VArAwXlfh<(9UN;(R#
zu}hHY$QYbcVCe>wQ^0*Hq~<fSV(92|Nl{{QYEoiyHX<^RRY4pNSEL6y=?Zk@r3P|M
zhp9!0+QDm!EqtMsh=u`TA{`}5!@6_u4gfSE;_KOAsI>(TFM>Kr$XO9&G;*N{y5RsE
z0nj9$f|ddyA*mDtIv)xV*6607X#yLFFGgYdaaao050e9>h};5@Kai3H))E3#7C?)3
zGzWnq8k|l+Nf%xg+rq3sR8+Rmq6x_=5;8f?Jd6lMx}-0Nb!keVgNR@SJb0=G<b51D
zHwAY6L~de9at6Fx4Qi8==9K6ufXxE;Sujd|Q0PLe#FoVgM<z%=2!mDPOEw@y*z*oZ
z283a<hFEeIxS|B@Q2{qokn2Xw)PzVZB&GzY1E52HNKP^Y<wEEXV*u#rQ1Cb-c(f7b
z6p#a<NfMgjA-m}@+zXz!1DOrN?$GPYU=a<uRVouUOA3l|&?%Zum9TTFbrh2FOEMsp
zIcTb+BsC9IlY*8j!e?E;_ftUXQVsBp28qQA<*7M2pw;hCFM`K;ixFi)B51KjY96>&
zhBh2PYoS0R{u-d`Fi?G_kd&H}U#_XA;F$-Rl?5IAngZG>l#vQ?8sb{E%wj80;DI~~
z!jPkrV8%IuuXlh36jEfBRDiYvXn>aEz<mX2Tp?ZfmY9@ZhMZ<$*O1vNfHlInX`mAm
zKx^J$Ch0;=Dyb+zDYy~I1~mjgE0=T>N-9cpK+~$Ao6a<K6l`Eg9h%4NP>iyI1hbU_
zWHvZ4M*$Mx;KfWJ&nFgT7U$<7!XG73U>xg@o=QLsc*yBJP_JZyu5tmj^+0I>><?5g
zfa)=bWuRn=DhE!WpjAwu_(S$2QZWqi4#=ypxCEI4@<?e(Dm1x6%|ULg!}10w;=wB&
z5VK)WWe^Iy@CD)(BuhcMAjt*nBUIZk(+p_6D!8hH%)CO3LrPO1`J!Buqy06CK_^uN
zrRL_Br7FNqSyD(!EJA90!jh4#LTUx5O;QpM711b0HXJkpj-?idq@7|!%7MfJ#B1PX
z2w=aV#Yj<Vv5o?YrkFfP>EjQ&IxsZ_e0ra6D)`zx4bTNKu#k2DD}zj2LaYKsR6MA&
zTaZ|k3ZD`HHTiVFv)08htw?S{n)%m*+USy*TmqY>gBY2YT8_;)9Y_@gI$s>w<ZMvz
zgQh?=z%B$y!%k3yxD#YOsB@eI%Br9#ZBV%Z5`^Jw@Xqrjz4&<W{fVh5@$m?qa6_YG
z)R7f{SC@j$7zgJ(=z-+f#h?{TAP%U}k(sBU0n(HU-bfZ74_b)@;)6zCLBRwP(Tqh{
z1<ebfPzNmoARG#yECY6*0&Hiqdr^LAfktj>Ng`O>78=ILu?h=2@Mt>d5ZBN=Ff#>y
z%Apd(NubkoGC`L_#e??}gRV3H_pc$cCE#Q4K+{&BbrT_=Ys`v4cLXTp=P7{Zh`@&i
zK?W2x3UU&YQ#0~&Qc{bo6~L#Yf$newO%)b_Z=r$>Qs9{qLqBd0cK9R2F|gHSp!p4`
z2zYZjybBHybSNmwFGwvasf2JrsST_QT)@D>1(Yg44N-8u151N;1A<IOD?z~dD;}ys
zAsQ(nK}O_4@(}2R!F0${)ASNh0*r=j0|jZ<QAjU=9PSK}1c@OhLr`i;FM+M3j#h$B
zK7kH!kB<lK<b_T3E5*iw)It~0=w%jzqhAALI%uX1v=<e67G`>hjsm24f#zUP7V!kR
z5bAO?--7%EaUR&muzl+ao*J<EDqD~=az6^1o<YKTkZ=MeSdc*u5HpHXL04K~iwqq|
zwT2ugNX97GLZsowVa6*cf8mZ+kSyL<g(!eb0zm^EX?rNd#UPKMcoD2g8SW?$7ql3*
zBo#V+4@z$!L9n~PKE(7NNFL%nm^mO_pbQ1pg{%Rz9X}6xYZ|zD1<G0QO+^YtsX3`-
ziFuH;hk76uWTkO(DpF+x>L8)p7@wGv1CF<NSQHmyI0BSvK#>VbIN%rpanK7`=vW3w
zN)Kj~9(s<(lHO1?fCB-kW(0*l2&1Y(Hx|0n7UVZrRR_7nQUey2;L;CN5JHwCLk<Ll
zmVvOv@{ox_NW8;*0IGLjxgL2#2`I}$^A_e#7^oz~B<yu+QEG8&iAHh`O8p56XN^J~
z$Sp9*ImPgB#1hu9OHK+w3xL6<#i+wP3kon8F4O}{f+}B#dq7DId`%QaG(*!ltb9pN
zEdi~(1UC>MYT=Dkuo>XG0bYlLHYOy)&OBFuj);So3?RD-bWucRUOH%b9ON=(q!n+l
z+oWL2+F&~%!6^>nVogxV1MvpPe_$EV-6=YdObpFeSXSkMRO*1b8QCbdBPBHGv_7<%
z2)l&~az-|iBf+~GK&I)T+Y8#(0neE6Df#hv`6b|e9`TuZdMSw|h(<KX>6vM;Faizi
zfEp6GDj$@U_K*|M;4AYXfsWz}v}H?>Re(qfjG<S>fW3g#v&hi}(G7AhSUs{+AS<{*
zL&M;-0b72G8gQEUTnaK4t7}0LpnR96WTz0GSd^EUmyZ8RG986#SfYU?BCvb0`>z%h
z;?PU$Al)GFbr_(e30d0h=N|%2DxeN;W*%s%6Q~m%l$ux!x)cgDOb%LstdLX$Iv_#;
zv=0n)B7Sj6BJ}<;J!rWC@&_UWLCuYlqRiwHq%sg83sQ^TaYODQAXMTEGLR9Fp%{=x
zNG<_oRM5eR;5dR!p@7a9Hbjl)Ae8%4AtM0bLzEMXQsBoegC`0yK!d?)iFwJDum&B-
zUg*#)Vj(d|5QIVNtifCEVN!aa!6eAJ?VvIVqy~gRMPH##JiLa1m%m7%1yTpK2z2-h
z#D^e35XN%6nu4-|qe6OqehR2_R#6FV!-LBP@bQPBR8W$SW(epobI^KV{3m;(YC`fE
z^fYD2-58Ml9iYI5vye|%Ml}R{(hBP7a3EhHu&x5whp4(hCv<{Nc2ods2UUL{4WLMm
z2YVFSx5li|!Px_xFu*IKQKLUI57G%!$O4^yT#~N<T2=<W;}x{Y3A%EuJh4(w52<Yd
zsaNB{#R6ypUoFC=khoPKB61PeB7&>6_~OK}REUu{DmTz=AqYjt2?`VtAY7;iF(5id
z9h_*uT{=j(AzTPj2*Svohz1=fkLbmLc1Oa@Le+?tAwlMVFafiwW7P9Nd&rVAQWZc$
zWT+7a$_Joy1zxUiq*qWGqizLSV~rH5(ApZ=8sr3kTvmf(8@{m&G&TS(hai)l$mIah
z5d{kf&>2vWDybMfxL|Gp1qBEzgR>u~83*zk^q_FG%nd2zK&N(tuYH4ciJ*;%V$ggA
z$N;R8(U9XpW3d_uasaGGae@@vu&P=CG?oCm!l|fIp|~_DH?yQ972ME<2S3>1u<{6W
zco?XeQ42mXrnoc-Apv(<t)7A#=-MSjjRLCQK|JJ|0puo#he0fCwF5{Tp#sFiS3`j0
z5vs8yN`xdxDWawT@n9ID2CrtYH*v3ighduCKBB?uK|vN99itAFjZufTol)%t@xaP4
z#t5MLK&cF36j&LU30ilop#;j?N;(RLNNpjIX%PQ_ScT|EkK4i;U~of;GzoO0EMj>_
z3ixhWJnjR{T07_G6=$ZTf)0Sq%`XGhpy2hoNW~|}i3rDncwirbM&9797^o$vnGPHV
zXn~Aa%YrAMF(*FI>`#N;FagT2P`lw_g=8bhgP<)}1*t_Di3P==&T0{OOf*G-urJ|0
zg>}&ISOhM@OL9{2cnbT3odUQQ4a$U|-ZSWYuFRy;l2pjC)sQA2#QMx)g+zt4(me1L
z)}VQS6j0M4DKROh5_AqmX-*2bOiD^cZu@||fEsj=el|!PhEe5Vhp8wfC8nTFjzha4
zuuwpXYfym-*>!|I3=NuzfEIho3cjFAc#86qKvg%iW2>j&SO6Nn%`3^N1RZ;kUXlS?
z`k9niqyUPylvLP|URh#MW`1e0LRw;SNq!NiqR2<O838<Q3es3ynVXcKQw&YLdEj;a
z&@L~?zo1$K)F1}Ud7$hBfVvT+rT~0kQ;uF*W?qVhk}9Zg1QG_}LeTPgq%la47^t_D
zTmrgB3dszx1gNnFHKj&LN1-6OL>s*f$V^kn1Zf4A*f1+WiZithv8`<bnG1FVsKP~5
z_0aK+Li9Da5dXu5%5tCu5VUqino)qL19zc84K(!W0F+3COkiP)XNY^i7sO^J$0z6K
z6@&I+gGSguyAWd3qjj}o?G-fat-v@YCPhmjCPq&oCMHE&K~tdyCaMh*wFj*i1SySn
zO^sEE*4B-+hbgp=QHR)6Q~^GV0NlOH%t_V2YBOm1M=#7Z$jLw073%-Q98feCRe<9r
zF(*d@<Ufd!5UJ?2oczQRjYRONr5Z8ndNJytawZW}$t$IT3)MvE)i$8h9upxWsEJ@T
zIXTekBNK9rOJYtAC?ZV_43Lrsq=G|O3yN0s`cnyB$?G9l1(jB?Zl{hyE@%=FI%T7%
zqyx%68X)VS-h@VTVonYudxA$~70@oFfHko|(GDBSg?7kFKpo`7BFG+l*tyzDxs{+|
z2YiJzXgWy=W&p?wpthVIXnGTU=TCf5X&$Jnj5=tJ&;nik4=(5+5doJ2pQ&k}XNV>Y
zN>QM&hi{*R+N}>6ZAmEsjmCfs0by9d1u9i+6-o+Ia`ixj2DoPe9yQj>g9aK%Ik>%(
zScKde1_^_00C#jiZFz`$K*0tP0?A^81(Lr}Gb~889%w02B4}0v!pO`^*9*x2k6M%@
z7Nw__KwMT-itIKdjd~D^ksJ<@%*;)V2TP@b%|=|<0!juTkANHvoiQ#+1xGeMhocpD
z5Iexmgv>@F*^hJw6i69p^sgW(u?W23J1+$iGavy__ZC#+;Tl!~snIJfNC6#0fnH*Q
z4AuiHfZnqTi3pItKo~9!9^Qlb7%T(cV+Go)1-s4wp%a=_Kn*qMG29Be5a)t~V4(>!
z4HSMwpu3JzbJ8$E4&*dYUm0s6gPfXRi`X^>UJjB8x>(-U0F;3f%iwper=%7qLs9@Z
zTj(g{CFkc9+Zq}e=pY;p@)#ygN-Tn2qG%hVUY!HF>b<r)DY2-wMj^U7H7Bv4I5nl#
zrW$^4MQto7iWHO;K&x*vOX7=@6LV5+Q%WFQ+Zc7o5wFFNjxNG>P;CnCIYTZ~28Vr~
zt|9nbQWwa^N_aX0ulfRQc+aV{R>(;O-K+}Aj)^4-mX@jtDfxM+3Z;1^nV>uPpf^)M
z>u{L&pmiMB5?H=MDpFy}!1AC-(8HQOK+51s;InfeQDh9hKN%L?FbPmn&@0YKO)UWD
z8NC$H8Vi^th=!YsoOVH`KsTg=igD1g7sxeru+8c5nZ<eeCE$CrKx!eSH1zH|#C|`p
zJa~Kxe5oBch(SAPic5=970Oc;K-+jB8+<__nV46Zn_mQ*UV$|EAnh~Q%nSJHFi^ry
z&IVn;TLRjD153-G-l&o;^kU82!~*E$@t}R?6`*<H#N1TSJ$5jokoJz}7UgSz$H76t
z0nJq4o$sJ516ni<_FX(^ye2*+H7zqQH3fPlA!y1t1Lgowx=PLgA4-Wd;14n!H2tra
znG4y^4pR=Y40MiaYDos%d7!!&G)n=y(-1ACK;4!M+LsT~4&{RiBv2%PoD05Is|Y-?
zt5KYtSzv1fvrkD$$sKg$t^%lk18Q_8Lhj%K-5Ci!lmoJ?6Lj28X-=vh*w1<(m*=N|
z3<ue#prDWdGBiP<xF9tdbSPS}LPmZ$q)N|8ElbS-Un5gol9-$gUigxoS)fp!k(rYU
z8t?)yv4^-1o*6-I(Nh4KX=|vEo0^zcte{k$0UE{0$thOIO;yr?NE)FCS3+H`q>x&Y
ztcT<}$2^79+=7xy@Hsnq>7WuD5=NlY+tR>B6{MypK%ENe+N7p{Mgfzv6*9orEkGOw
zU;G2{i7n`uh=SDQ5>VZhoDHf+L7@Szt-v$++F+9*dZ7&{uue?{8<6QmB?{UKhVZzA
zxJV&J9o8a*^d!JT=Ab3hpwNp38y*{N5DW7iD5Q|O1dzoMpd*U&Kygt5>S;s7G?0vg
z)^f@U3Ay<xAQKY6{gb3r@D*nY<)EvIAoga0&a6vK1}zK)ov@mk4C>i{hAs2+it}?)
zA%0U<2=RCE*C<I%&M3*x%golaQUJA=!ToT^2zPuuXb2Rx5CoLG!HYQHepdjeK3LNp
z;$V2{hr2c<wFGqjEI0{*Tn({3Gfe?hrst$V+O<gD)Jp@O9hZYVsi&;q2`ZmK+vKxB
z=cI#Lo#lzpBTOL1gPP5dg`63w3Xr1}phpZqECfZevO+{egcT@%fEVPY=E7D2=H)A(
z8dqGBSdy6xH2|DI6?7FcOTZH#$mW4e1@-8`L&1rNzA-3gAk=}p4GSi0K8=P2MJ!T5
z2D|bS8j#8PDd3yN!ChYXCFOc)@gNDk<a}@`0kIOa!5*es(N-Zw-3Agjb}{PU`_xME
z3m~~hp|rRZw73PL4dfSOmx33og9N}g4#B(q`9%te(13v@XJust$Fls)6our>qU6$?
zM9@G^S~0jo2}zJSsfpRp^^nR63K=CO1;tkS`YHLz#d?qvAN2Bz()Eq>%=L3JlZq0H
zD)nK7ZbnINj&eyxsxD}zsi>eRwIsC&bR`pPc{^y&JW`N?`wJjBh?k(bQ%Mh6lYo2!
z?_we<15mn&2fGw>k2^elLnl+9PJx)Hpa8m-TSp-tjKJ#-!B^IzX5Pf2bg)!0$UK-v
zWl$vvxk(YUpfoMBs2H^Q0W=g^l3D~FMg@z51Q01H6B=%y6-FTEr@-?K%v@+81#*`r
zILtw;XoFbL;tp8Cg8K;+$snUZTc<J-OB6s?=Ykr^poT+?IyilU(sNNN^d=_IL79++
zCSY&E?1UHtt9~JiHA+*#N<f8|2FzX^aM=V+!Vpag&_z1n`X0?)5Ebx4gfdH@*PEhR
z3saR@408ay{h*@&nm^0TO9w4I1TFN<NL5HoE-6jSfzH{$bm+m-F2pbx2~9At^+_<*
zpb&yDfdMUH1J%-SU%}lA5``?;1?>TbIT~svND^czD6uJ&R2G1SIiV@cF|QJFS`VZO
z&{0TF2B`)$1JX0gQcDX;GEzZ#5WZvsoL-TV9rSV<urRDv1C<R5psPPXX$o3aK%50S
z8V}US0xQ=`iwE%&VNDT`J)jkMpiV?(K`ONIWu;&M^)g5vq#3sI1JqaoiGf-?>B*o3
z5D!j3MX5!_8Xz~onk}GU2Hk}NG8Cji6I7{#7$Dow(n4k$sGR{Cjnn|C&`|)b{K`*B
z1uyyn1twTC$aFM~P)lHO2-ghtBC39nAaYFu(hS2OoyD+n8^nQnA0`i@p&1aQ0(>$q
zqICdMfuK<m14tMu0M-o3w4j5sOY@SUx441)2}%ag<~!2TQCQnOB{eOvG^YeKO#@O0
zu4W;Z=R$7ZMYP{RH}ZmV2SgkeC7=X`HtGUCZa!B>0o45~%7mP_g)})1(+P5YG5SFa
zg!DpEA6P4Baba<3GI$#uEHy!-L2d$%Pq`Kq<rhJxXBBkqz=a7?Y=e(_0+n2PDfuPf
z`7baBv{}M22YDb8GQJFv1f8Xy3pu(dCl8iU;0{7q>jT=YAEWLX6yzTS%JN{{;9VY|
z^#dRcAPg48&;+^)3a$rq6kcgjDo7V7hCsSN9!yM035NI{+?Ue<-~4U`UrnF~Dw)B(
zE$|sDSbYx*eI%nmojast3U(emkYQ;VO_8z!VS^wc2)QZ?ROl2JmqHg)LMCD$-62q)
z40^XXBql+gg=huG6==i-Vg^bqB2<AxQUmEKIZR^^BNv#1C=jc_&23QGg*@&AvIB&b
z(M{BV7M$R5kmAgwoYVolN)Fv|@T=rv)XPDwH*jD>&Zq&^NwCegASoCdBn~++H#G$$
z4)<OR^hP<5AS4n%N@3GyAW_f_B48WP*V=1>RD#EVK+3>Mz&xb+c1X~JFDwI{$pO1d
z6Fg!8A~ZBX&9<Tv&`p{MUxKnK%BBJMQbkZ)fpDIWf~|rD(xxHM&?sn|9&ASeXlMy3
zHG%YCxB@behI;2HszX59@TDxUJgAfc=O|DJKu&^z?36%w2&4|0h(YBWx<b$uNsz&O
zV(zX(H5Mu5f^2~VICMw~5}#<n0F8Ono!KC*u%v=1qYS;L&Q<|>3=>E>2$ORY9;)*}
z^*;F0LFgTLsIpKyv~9sRK;gdq4pkR4q|u93Z~*|S>X1rS<XQ_+;2~6^m5JbJ28n^L
zzyqD@jx_oYpPvi{-AE2<$AQ}IAjP0#X^~4`^mK}ux>2Jb3zdsVJ)ofkP<l#FEpbUL
zPKMUX8X!B7>_btH;6P>>HNcZ?pqqpcVF>mmLLnqEgO&lKU8|~~s-Tev8u!pq$bwJ^
zO-S}*tV=LN%C{I987Vyzk*C2<#eN4Xs<p6bBCrO8Ga-IMc5F^@Dq4ntCMQrpqiZCX
zaj|LA)I{7v39I_S2_2+39u#??S}L`;1SSb;7eH1z1%q!r2B%--lnV+du;zlI)H2Xy
zG$@rj=Olu%CuDs#nwP-yOPCkeqvdY2Srf!41gP8xT|*4Nz8Et84)O#tMo|JwtMCq!
z9*Q&~^?^*lvX%xUjG_aSAZ<$XAiZBusOc#LgHFWD%Pave0Mjd|v{QlwAxJj}D=T1_
z0X{zu9LJy~OrQxrP<};q5B3y>Efitt48<Cx^aXMRI1Pf0Oa}MnKw{uHE=nyf$uCMp
ziDd<4h0x;E6v#jxXj=%VWGRK5u?+S#!lfi91CS%ZD<PpSf*tY+x_ld9EXa9qmGEgz
zhzRM`B{-zPDOe9w^Mb+~enT*-oyyRGRa*t*0t4h4NGyU~4tEczHGwom1}cLw9fwic
zfJ}q92y|N^YV8ZUzX2XFAa{bVu+%_xB**~h?S!Cq5+qc>Hxd$aYd3fuDPrkfW}X6M
zWuhK@DGMkkC}14F0d+F$zHU$|2M-{?Cvrg3+Nnjb-e5fFl=Re`JeSnu{G!B?{32+_
z5!*q;u&p4W@Qxv5VJ_TgRB>1_1Wv!Gn;W2~t71DI2JBdDwu6EbgdrV99LJTy`X%rV
zD%2*3CR+#vx&kA+95m<!9@+yL3U*^|Vs<K|uM19uF!zJxAsS%?6_lq8Uc6M9U#f_R
zc&HSP6Ia1qNd;R4j8NBr=tC+Epxu0MDuWzB3GS&w6+$;?pmw;yY7uLrK~WF#S9uZW
ztnVU?wETRSmq5a~iRqcim>va{orLGnz*Zs3YtS?od=3p1Ua%GqsG$YogK%1YKEygm
zXo2n5i;qvqPmYfV4LRlK!*~z{&=3Ht1NjA<GC;!~;IpY=2^ccv9uM`TjzT;r7UQ#1
zD|HlteDXmfyI_4-213BqLrG}?XnrH!8FbTuXI>iU99)Eih7w#{Nk<`CDI>F_7_@^B
zwA&#S!b+?t&a48>R4IX4&tP^eI9Y+h7`%EL6!1Er0ld@{)Fr}7N=gd8pjc1<t5bmb
z0pfeun0I<+Nk%F7G;)2&$hUqnlupXYPtpfXxaj+2Ch5Zi2DG>td=mqF9xE5(MwFRj
zkn5oKfg=U)Ii#SyxzKYi!7h!@Owj-pWEx6ft`btHYk;zqjshfmL4A$9tOeTrfTSO=
zZqTBulElosVvX$7N>~B~r56we4~jz)2M%XKLJOo8gptiHC@ldQ3EneUihMEwsNh6v
z^}y^w)}NjV){mJd!HOaFLenTTBtfYZsgNu#0gc2UkEcOIV4egW>5vRMq8HK-18=VY
z4a-4SPeTr$09X6PpeZPY(&E%2U0C-UbVLP6GkDGwG?xYIQWS$0U}hGBkHkzZDpmj|
ze~=_Nw4n<WAZz%cDFQUSAC#JyqMM(W1Dae-&VbFrLzIE8c?OLuLfRWey5&WnMcsLj
zR15MmDC9}lmIt2W&Mz$iIUni-NN9r00ZW3Nibo3OBADf%)C@js6>2Itt-yjAWGXb-
zfyGf9tFTA_ttQDYg7o_l1shBjG&KSW0<>fUNiMLVEIov3g=(ac4rn65p%}#vplLu*
z3*9%hBqKis>|Dq=A4CAzOW-wY*aoa1hctj1fDlc{Lzd8mG@xDtLaTzV0@C6)@VFb;
z3`nv>@-k!u0W62&LC}0gF?cm`E@&~af~^A79z?VjgVuF{L!dY#zX+vIij={?3LuFt
z9+Z6*z#*UqP7dHg2h_a)4<yB?gZ9FKdo~E4`X%P3f+mYVmO&g3xeKK@1<4NR!a8to
zDn6|=4-{#jgH^zn*dsX@Y&K}h50<7hqruKYI0m$Zr6?6X-KGFdJP-vqoDW%859;MX
zmNbE@3W%-B3L&5!{+Y!Jxs~9vAix8>kZCAAh2VUJa?t!6=yc%Ja)rbs(B7fU5_^bV
zPzHe5n+ZuZ$SEZm)^0&c*YSv*I1m#+xde2wF4&LYz=5P5P?#cAK$EgYvQ948<se-k
zyG!#j3rnF}i$D<tG6mTQ*#<B-z+H(HC6M|BIh}$gh%!O5L-4yO^pf*S^Ge{8Ss;r+
z7@{@X0My;i)Il;6Bni#+Ajd#=Eg9jhZ9saU%cww15Qcg<JGBxepAy}i#nm8%H+>Od
z3vniBgC8UZAcqH74jOcMDXA5Z+t?8^IVdx35F<g+5}%p}T5JU|FFqA=l@iD-<hX%`
z5^MuBNE)q(1Y3-wfCS4CSv*43g0~1E`;JH#7~yk*5h-o})gPdm7UT$|&;q*;RF8m*
z9UTR5D-t?i240(q9Bhzq!WO_tsSuicAZiiy1lS_bv?rv20<{7qM?vEZRE&UhLmdNd
zdXVf2kO7D&g=GcMIV_+e3~VM!>IORxkyAk0!S<7EG1U8@Oq!VoEgsQA0n=inJW5lW
zjgYN|9DNNwPTLt8`{4B!Rtk=Jl{yN!#pz&{f~^Ah2rta3#Y}hwhh7#UB?o9i2YCw`
z3y4t8OhFD_P}u|R%0sto>nMPy-a*^m6hQqC&|pt?YNe8nLMEtsgCt{+Hn8bP{?Pz?
z1lnVSCM!_eHZujZw700ZL=)^kBxiz6fT{!~K!i%<AcHCZpJ@Qnf$V%}#KU%x!;T(+
zu8aYte7H75!qij%bt;q)w=C%^DQIJ)8t}TM_;>|v1tq<L>>T(w9=N<i+PjMGBxTU*
z0B}1R+>r(^MN`no)JxTaRIDZW3LxDICHcskk&;2~gw<WhK1K>IXsp6K3%NcB#)Eks
zY>*~$7=hvn`8pvSI$%K#QjT}n72-NjA&Jyv1L*>v^_5?cnx_GF1gH%Ky5SCd%op<g
zb|AGN47m;pa=jfW6cI55O|(Wj3ZN^xAW2SH!Kt(abXX~PaUuBjV9>5Ga2WzR`Y<Om
zIkO}Ob|nuumXPW=kom}g0w1V^=vRa^<3L;Dz&&w=#2nCOgG%rP#h_CJ!c!qjkU$f~
zAY(ym?n=rtQI2T=`3aIBAua~-L0DM<WB&?xBYg^_X9HThnw$YX*&SwUacYU4LO8S;
zmYxsFg86y&;CuW)27)j+hzTSZXvPLv1~LNTRZu@oNk;*!A2o5o5(c#XhNvJM$9Qk8
zOGzwA1g#9rOwP_pMZI+m<O>)EMGr>wgHBgZEJ;KJCYBC0)QixvOHl!AA2=_8VhVf+
zAd+Vx>%74iVu8jnU<m>eY2c-8pj-e-JD|~>pw#qC&}~1NdFk+BOO3SrqSVZE#5g9*
z4EQJr$V)+~phMn3n-Y`r3o1d&pwYGIKw~|#SRpedHLoNyIT5_~F10MR2zmk*$e{AX
zN)0VgE50H*wV(uYXEtb*EiVORxLyiuG!?Y(2z+=JXhAfj6P}Zkn1p`QPyuM+T7GE>
zw0917GswqzspUw%EX_-X`3&R^&_>Lh9FXBC${-83ky4Sef@?)0cvWvsW_GGVeol%4
z+>M}n!%9HI37}q3c4}%tu>$C1y=+kMfV&(}m7rDBNd3eh@URSOpo14)fk&<r3kq_;
zCq$q`ijD%<dQdMH<Yx512QR44PtjB;&j)P{1s|Rb+GT-AX+cOqYYVz<5FRw3K@5=N
z5pIV%8#JU13Mvp*RtVC|FUm~M1YP!H3r<)dVK7!!$jwa8C;=T(0zRD+)i7}Umw*oI
zKpO0Yrq3Wy;slw290%}q%~*m7E(h@#nl{9MC~DxMj*coTXlQAMr-BRJq*U-#PvFQ>
z%BW1wOwG$oR)WMUvf&EFC7C%n3Yo>=`@@Pe)AJHbN{do;LAREs7J>Fr>nMOL#LE0q
zM6n7woCTC(p$AtaDinb>z2v4UfX)vDC6a<f&}zYw)S_ZN1-JYn1<+Or$jy=99Ud8p
z$=R93848e<x|yk<d*hQz!RNYxPuNOMEKbz{SIpq?r$p$LX0VlhIts=4pnJL0Ay+Ra
zDkNosmi8ByLYJjOBLUf;p!fh4CJMF+ILix=Jh;IP%43lHQXHOHk^xUppu?m<OWL6G
zBCuQs&zqnKhnFWgInWi(;0++oKEdFNVia6lo&AFxL;Qn4sj4Ic>SB=2{8G>^qx|9$
zL_S1WVT#b13)*=MiUS3uB1FN5(57Uqke`QSGH44j$k*W8li_&)MYEDZS!N<A#3AVq
z<Q|X?Y~?CAkCbHQ=b^X`x(pTCoI{wB16uF`N*2(Zj25EMgH!X1U?(p^!WVgb7}W7k
z19kjSmh*#L2&qw^R)SiPkPDu{yQMYKKy6251&{o4P`Ls+Uj;NI3R&a=KcW$~!x|oi
zAj@(SE0a>8kq#>1KxefSgR)v?a!Gu=9=LN<j56$i)Cfb>3~E@x0~~YS8?+w+#WLiw
z%oY)$Xf->+@2IvRmF^%VAY7T5nv(*$cL2#ppu7OW&?w6*%`K<|rN+Dh=uBTpVp1vS
zTmjgG3XBhx25o2rO`yW0VSM=d<ATbx%=Dbp61V_tIuOZLXsp4NAl-eMSd?CnSX7L-
zj1+tsD%7<gi{YB!J5CjpVS78E${;&X6_S$mK*dpRW?p7mr2^>qM$kAmR1IkJEJ9;S
zK6G5XFeMk}Gq4KS<;D4F3J`O2!RCPc0(K_o_CT1E^&Fu^RRG8XMH)!6akeq)uAroo
zSdt2A8iV#XfbQ_t1Fi8)Ey&EtPq$T4QgW?G%q_@CwNi-53xQk*07|u>@l0?rk_esh
z1<A+c*(pHgeH0RlK_z!?YF>$9d`ba$x(BWwT!O-nP%#2+{zmXX8<O#vqo7*|TE&Ii
zBCrC8DSr7SsgS*Uki|vC3h+bE!AmSqe60YwLcS<51tlzWK=T>k!>HjFLT0!P<H06C
zJ>~|gt8*%KKy$4i8xu=FBfQWX=s{r$swY6Fu7aXI5xRd(K?8KQAn2}A9q<MRP|FKq
zIP}U51?Zi%po5x=N<dmcm6ifj7o;}Q%~i+;uVn`Bw15Ua^t7_l;?%U#9KD!4P<@A3
z^bcCHmXQjLHpsoqpoV0gf=hmKDJaH@^^@~Ui;7d@p*HD5TxASiKMC4ts#j16adHBx
z!UT{v6JbdJsvGRiJcZ<p#Ju!W=*BJ3RhA|B3Nh*kD-*zBP*j?S8rr(K3aEDIK^y}<
zT^)3U5^M?*ecO$(o~1r`!<T+?ZVIUU)dOw10bj<f0}89$M9}#OkZ~SccxDMoEC;O*
z@JP)maLX@(sL_ncgXAi`#FUhHSk12yqpn*B$|-rE$wb>2b$bv8wBjE$fe)%Pz$@6n
zBXT+lpgS9FW7PdYb62UbJO^%Az*D|Ps$ROD6*%>3#;D`AR2Q_m6n2t0$WG8!kWA2R
z?%-Lt_>!Vh&`dAP%3#p$M^Ms&Y0*zfEzT@TO#z+EU7Axu&??wm9tl<@f{$p2<Sx)w
z2JjZ;(vnorK4+YPsS7#Rn@DRQs|`ST0#fCI2fRR6YA1qvH{e}?DXDn<2HLy}I>JF+
zM*)8bI+m8?=YlrQfi{&XfV3&VOF%u);wI1?P2hcsDXB@N>FJ>32EaCh%40~U5f<g~
zMX8|DAilVy6x46ifN*u-l|MAK6oS_2LD`T{fpS2WgYGc`&GP9eWTxlk7p2BS$JxM#
zWkTlep;sc6<mZ5{<%NjBnueg%3hGmXYXd!S!Um=3V$divsCovi=7HW#3ChhNo4`2}
zq))F9Bn0XkgM^Wq0PsX?3*JbkSBPkBfeud61E1^xQ-q`sWFbsgM*-|@&<0Y_`4eT3
zvp>o}%`BJ_*pg2@CFH}kv2^TV8bE9M^Ge|77=xSAAUly-N3bYRf~9ll@p<6jRDx_h
z1Nj&>z5zW-6QT`#LJDX@DpV8X<WPhrEHQ-SdyIh{I|cC85d7y}D(NV|+yXoI60c))
z6qMk#9(Zs@Pr*4OH8~rSJ)wm(X!|hyC`-`6Jdg;4-@*inceFT%BuoWpj6-;EUC@Mz
zMH@^8st?KqWff4wK(aOHRsqoXLp*pu##R9>`(S1pSbRehKJp@ZTTtXdTn9~Iw%~LI
z+xi1P76R%LTPO`R3RY@@s)%Ae<f0d<0G4bJ3Sc5S5L;~_6v4A4K;DO8kUjCR?2F6+
zr(j4ZD=UBw^8$@1fU0-oUKePaFRU8@nGJ_FFF}o7aI*!pY`8431ll1(QIlDeldPAM
znhff5LuZXq<STN@GxO3>WI%(03ZS9eg35{<SVJ{DzbHE?KOcHu6N(W9iFqlB#h~_m
zK?=+-AfJQH!LkMlI`agY3qw|(0-DS~l7mk#MuXNifeeJ$3f|oWZo7bTDyV#c9wY>j
z2KBG94RsVW3Uw49hcKst&pQI0kpj(Fu;c|lw;7@nWCd8ak&XhW#0C{r2))qL4q@dk
zLJLS+N=kH$I?UMkOwep;jCw3=CsVc|wBLk96UZV=4Mw;%z-&P>A989UZY>}?kd%YC
z;Nur@>i}J+i*UaIsPzlVcd+SNkO`oUCs-NmfTxnwT)mQf*x6aAUPX;FSh(Oc7gafE
z1{PE)fFnf@RSxC_<b+fR&M@F3uRygk_>gDN%nf*E2bu&x%k4m7dKEdv70{!8p-cTC
zF2_7=8DcDE{HMf&W(PoZaEcy?1-hjyHAe%gT(2Y_yx|yDA45zBc?Vi^Km@>v2xbr{
zXTa4X`wW~qLCF$wj*Ov!fdQs_Kog&!{q#kpdGRso+Ms2?*|1;&Sp&wIX`qQ_kWmol
zV>kw+DkTLrT?-ywfvAj*Q3si907_7xmTzh?{7N>2SFrjI)kxHMMD`^}9S9?>ia?1P
z&}<~AC{nOhfR#${&;scLpZi>#k(vrxtpro7mzZ3VS%y+Vf<iMnKL<R^3<?QEUQbDZ
zT}*-zE+F&3X6u3OcF-uwFSj)WU1yzBnww{voS&m@s0kUrQLqJtFuK1$b{6HAV=G{w
zp^X`#AU|QTwkW?`+X$D{pd1Rd8%lvp0f!7I==4Cdj<6((RRPMOK=9xPonKN=qTmZY
zV#6=L#0_+@61b5AD*j<RRUqjAG%ROiY=Ue$s4j*z4X`v!p@9u@DWn&m6i}33mRSs1
zo0tP@HDu=Jff_U*MIa0tInpSBw=kfI9V(lX3R-vumxMS6BnPnqRX_5mAVgkC0c0CQ
zR~l#_20E++8Vm%Fr-CQQi$Qz)!RNJr7UZVpm4HPxG&Q3QWA#8YP@wV#Vn{lOi*y)0
z=$;#pEa(u1Xe(XgSWvM9AB}((O-Q8?xXo9r0X7DfRuJ9+-5L%b@hC3MDJfO}&uD;3
z@5<B?1@N_?u*L;wE;9x)i~}9yg)}1Jt+s-k)WqUc(AYa@;1)c#fO2^pG>Gi%?38pA
zATEM<63IcHd0@T8pk;EoiFtXcMWCbt8tgAh1W9EUgU!{01T#c?5qQcB+M@-Ziv;O+
zm4ceL8ig=#D=TCu*eX<I7HC9kWJZHGpT_DafLO4-gE8t*0mNR@SV%_%ywIx<l2wZ}
zG+|AQ3<X=t&4PFW<TlU^&Cna3GoZ;5>bHz&D;<Ma9R(0=2&Ii;bwIfTvQ!`xws8nF
zfDuH4O%Ft@5(BvgG*v;c!4B$6z?$O_HYlruy0+j`@h}?apo3V+x%d~{b_NZ;#HZyX
zrrUzf3e-`6+}a4~7UO7^gY}_wBfwfA5d})D$QY~`)>;R<AC}-ir~24}rj=7cgT#7y
z`Q@;RC$Ts=Gc!I3lD=&fVB^zzkYQ@*A#)l^ph$HG4N5A32Cec+Y?V^-^K(H-ALM`}
z&^`#zO`VVf0~MlkKqFfU;JILA3n2Yw&>&c`2KeYr(AJ<FJ<vrG1sa;5Q#1`?VV8(Q
zvS<>hP*ErYEp3U9hn^@K9}i8DDJAhBt$FF7Ez$YqAal|{{?kxWP0?4))mM#FQ1!4<
z^|ewBhPf1CsuFl>X=;j<0?1&vF;HVc<v+BwRRY?s1PN?xU3I8~puKab9;hIw%LyvH
zK&c9;egIztrlbH<1hOAhAH+7OJE6nkpmh%*mBl&=N}$WQ6?An$X$c(2kWtrSO(jrU
z7n}k>Qs86;S<egK^aR^xlBNV&OOcwQ02v;GEPA(6Py!vK3T}78I*urV2|5ap9j*}f
zAu27TehkX4Fi_ki7J*tapeTXvD1&GQU-kt(rXL~#-kS$0Zc__OK`RLoLFWV(mq3=T
zfX-bjE=f&Hfox3#T_Fe!Qb>^#4A%r&4i5HR5$a9?h!`w8fYL0ect}qz0cikjJOHiS
zg&c(eSwapH1MmDq)&tT9ns5NGssbewSW$#w5J(lm2#6;^x<MEu2b#u(9*hSv2i!~r
ziNp0ECe^Z2D-kVKkbV#bsfD;guQ<O5y!FBsB!+CR9=JOPP1GPwAPg<VL24oTA#MY$
z1_Ixdfi)<hW+=mA!xnLl1}OX>feR~9V5%U8AcDhC!Bzo!-XW-91~~)NoXmstb;~tT
z+py5=0P8No`??@)$_j-FwhDzXt3X19@z9A&TLo~D2k{R`2DG~oE(e;Jf*z>>x>^C8
zZm^!Gf!4f(Wnu8xW1hC5f}$;aloXl<L3V*~a(-S(W*%&&9VAjr>Wv<-EDF{L4Rw$~
zh2Vk+<aM;w5Fklq1te*RLZrf~PzQFBI<&uAs0Z4Rgc3}85M8!VgAj6{ZJ6-Q$)G|W
zHW#g+nrWq=TCAk1k*95_1J#G*U`Y9eq8-cb)IvR2dk1tjAd<Cuux1aMIB4rzb*+N3
zf_ozP&UHvh0zPdk6MC2xbX5=}U_b*+pro!B0_{LSgbU+AtNcKF8Nk=ffNo_1UHt_r
z%0T@OJy_ZU7qB1^Pzr^PqkxK8P@(_{!xds&H3l8o0a*v$01DYDiMmxA6rIS&Tqr0j
zcqArgBQ2bQY#sp{0Lo|3#uR+hqOB3A$Obi*!O7VctPo-o_PbudTTc{hAqN-{V<J-c
zL2s8y1zCsP^eP2g1$e&+<a1?k_+?@1GeQFsxp0MazChUz6tu|ICphoH;{>#)BfkWE
zSP@tv3z{iGx{z`t#5j;MF!Lp}TL_YbC<EDwBTXP#2U=JOIbjY|e1R%<@GLrb`V6-4
z1hiTUwEsOHatkG>{Qw&RMOX)6L07_p+>JXSVf7LCTn$j~5V0)@k$EsY2eJ*kOCIE6
zBy}KhFa|9nML$aetGyuA*f#5fQZo7t7qBWl6THPP6@Kdx{5T14;s8wu7Aj<xfR|2!
z#*m9kb2SvA3ysi?1BG~@5jY=Wk0^M<5#&fntb$k|4E8+QvFIhX*gQh$)B%LK;4xxQ
z_5hD#!v>q-cLXaaE4cXk#i)lU1iAY9hq)?*cz6aY_;~ucVv7J!X#^f7(1=lo8xf<f
zsR0?XhmPE9f{&MU&MD2yu7p`wT$xvrSOLl`&^h`PaFT>C$bc{G0EGhxBYXhjfky7J
zIS3>N!URJdXEFil2H`>@Jy6k#Tw|j;0i+0o3ym-eQH=9kuqJJgW)N0}470+@38=N8
zH07C>SpvQ+7PKTH6<&SA=S)GqEY3(Q0yVQ?H}hc6v><z+LrJdS#jKh6d7x1N*Z>kp
z9@Bf!j1KKt5)3k^a!BuxplXnnklGe9;sfG?Fs3s=q96=aZL6eZ3_EfVBuq#{no>CE
z3|vrbf&&v2TCl(dT}%XSL)3yB@o+Ve+j+rL@1O-q<qC;;kb6&H{V`%JM&6A~$dO7R
z;1wmHjou*lftI>}_xys(Xt-<3K?^%`GP6@bt8NoBbHHObAg6#ZK`Sr<4qFZ)sZ0eO
z;E@VF6diX?!FPZbs-qz>OsMsRH8+7Yfer+Lp9i7<F1exoH*ANDVKoNg0wo0{*ij&P
zJ_@?v`zt^ez?+nyg^1b;8ss1R0X{qzRC|Nxg+SE<wEKYP;15JtL0ySB{sR>MAdFCg
zyQzp)o1!NIwBiU<MJgyO1gDmOa|q;6U3gIhnfA_0P6bzrpaq7YMR3qVVo_SOAlHFf
z3@8mpTLsVwMG8u2=So2v=UJc@I!F(snUB;;$A~(jQ!Vx?6>JH-f{zDv{9!dJNDA4_
z$gTkS2|QMTwXlOk61slS7FGrDS#Mgoi4__KItqC{x+c1y0j1(%O-%*Rc7wbWTlnAy
z$PP%XV(SuuOh@e8&`}sYWf9aKgpC8CEg%3dJpv7pLMt@*KqjQd!id@7e##;wS&@)+
zupbWvF$R=Qi7V)F=L>MQP=J-Z3gDvG1X}!pG6&YYfnI(=TOp9#f;D48%A5el;9&6B
zBXj}@w6hrAs|QV|LW(8OnXRra&{-jnClTHStpmk=bRj|wNJlYr>{LUEm;($EdLaQ-
z3_4v0*^}kPh%-rb6p#iSpp!MAQD%gCxR<~uALxMw6^esO^AHM<41lYJ)Obay#gNH=
zhzk%}lojAAZ52Rk&B}`v9N=Oo`3T8$xI&m1K83lJMWC5Z4Ui^{as@58KQ$Hf;WjB~
zYJx}O;jXb$FaVdw3I?$8I#Bq->{GB+0GR|b5-y~rV5nz+VkE+w;6a+`yjakPFL<uY
zN}(LI2m@vv=+INJ0L(Oy0E`V%2r8Y6L4t+xATGF}3Fd-XNN0x>B99xvOh7mUWw;4$
zJ5n4L8-n&9fvZvYKv!XPp0;5vQo=0;?T1fO0yTMIR@CZOmm@2IrZ_`TC8eaNq@|#d
z=L4yl3^gINk){Ip6trR^1*GVM=tFJCBSzX`t52fSl+azLqmZUljqp9hJ)o&Ui026;
zb#MehodF9L=!!*!Tu_G`Ca4K>EX?C5s$hbkMjdFYU2zF$dkJ(YcxE1SHyo^emR}4$
z4H&fi1j>ZC6)F!i3^d`K1Um636}m(V_f!^q9tU$e2x%4wVhi}vF;EHywNYS=Tj-Pp
zG$e8}^FT|5LHjpL^!3f4XKg|+N&<^2=z_LAfD~!LqziJ2Kv&P@B<3ciBr0U;C=^?P
zE?CjX1aH;`Pd62VQ$;bj%*+H0CmTZY8n~$faUCQbq$xS)gLXWWfG4W76%hW@1|Q?9
zpsfJ1P6O&t9R(!_O9^b3j)D?Y9_nNr1tkazB(J0i>VZLQ!;-?3Lh?%zb3li8DS_AF
zphb$Nwvs-I1R{Ze5*?_pfKJ6j@;rE`1(ahoV9|;wdclVur=*r7X6AtNI5;sv^E7m_
z0&13y0>Y)BatVUL12M405U^np7@yc}IncEc@ZB?6#rb(~4$}Ucq$(r0Fl6I5oC{I~
zne0!hDlf_`N!0)u0-B)IQ78f3)egSV3AQZ|Tq-N%C#97ZCqp{Ppa}}t@i^e|AlM1}
zNmWLmQV+DQ5i}$OHcLlAsk{U<?O3dUyeB;ka_~CbW=&8KfhW4a!{zai<(I`8nYjh9
zkyrS52PmeX!45jvCNsAHk~$!RxVfON6=)Fy%nWFY05)Nf1}-$KbK%nywV?C`N(gBR
zwhAC&(CQJ`WjD$SDWL29V9SSLyZ%Aj<Uv}L6-x38V4FTb1wgT0L1IaU9!y<4)DiLV
z8X%KFi%{a@A#=-+yBtBs@M`MCgO^UjXN%EI1+6b5+9Z(uaFalL(-6G{xR*gKpBVLA
z@DigKb@13Uct#2uR3M+>_ZP@BkYIuqK_D#<^PoOP84&|Du5g-7qR;f=K|TcEp#-uN
zgwZVq`vsJsK$7Tzi5`_iTaV&jLZ%`GA2fs@DH^nt24oN{#X?2F$3()!sxk{eO7x;K
z3*0h6rx=2~0acip4^stF5)T>~iiciuWUG*wujf=*l3MKPuaR3BUzJ$^UJVYlAH+>6
zO-lpMog%4(N`o^pn#~#rJ)nGD4q7FeSPb$4Qe-NF_|V)8H4Efk&`4!betwA#X#Y|%
z=tPp7RPgS%{9?WG#GGsm<PbtK4yG66Jy1>t(b(?g$V`K*X-dsYDF(S+LrJfo66FR^
zkV!C{21+wX`7<j&GfxBJJdg!Q?uO|A(Pc<Fic)hxW@x~Y54s76Ee5c>kL*Dl7F1;x
zKtm4fKpllLa5)IF8(!K)$EZW2B}N_8Fh#@(Xnh=bc_?fk4p|S9?GSy)0^oBbkyWGQ
zAc%TYA*c?}F$<tIWBH{;$*G_{2Z+)Smft`TqlbtU*om#^u|qI@zycZ;oM{+`tbmLF
z#UAyN3A~QT%t0Q?1;qyp!<68NQPPqOQk>v050tdQ8wKzrycl)JwWh@iX~<P9%*8Mo
z5#ME?bd;6`UP=v1J7`5THh-jnng{UO4#~j?N2Y<w37oFM?*c><3X-}&2@29oNr$uv
zN((@1KjKUB<C7C}3rdUgQcK|bTJlRv3Q9}jQ!<Od`@Udp22kA%yE6hy+YEG~At(nW
zmZU=WnZXT2U7rM9`9}PjI@pT2kRtdIrI4mFqRRy3+bU_nF4cvKfVakiC)rBD4K7&w
z1R@P<Fe0zUg~o+4<iHY8D1nD=Av@{8*SCST{XuuVfy+(MVGW=F1|<&YwMEdDb7o#j
zW?5!RX<`nvCI??-WPnseL)@X{RH*=o5zzDnXm(T&+~5Nn4>=~HEVT%<V;dv~%BRIB
z3tK_C7u;6EX)ltMAZJ3Y1+9a{+WdgZI~JuXBp0P7mZYZW=jNw??z05lT?X@1PNhOp
zC1?#UsMJVIFG@`X?caqS>ktwOx-c;@SD~mhC$$(B?jXydjzn%(f-ZxA3S)*DENmfB
z09rT&J2@0Jw2=)2uPQ|HG}J?$dC57YDWE_G2O`MbkZ{-2LzxW%1!%U80ys<&>+GQ}
zM6n0S4v^pSGII-1vbKV)0yN7(S{=n2pi?11N?{mcYJ5>@P9kW@Klq|X&}kA_DokjH
z1yosq+z-uQh(@U<iq+7R46_I1G|+Ac$R1D(>mW51F2_JxN@zxbn&1#^phN`X!J7Io
zh0rLA&IXl|AP1nD13pI;G<YAK4W0tY21N)+8ECmpHfTFLNC0X`EP9-REP=!;?tTbJ
z4G1eMKurO4c(k;j9MCD}AnTw#AZV!ux;YYBY=T<}P{puhsH_0%;wjjInc!2viZ!6R
zbre7g@bXG*P2lYh&~kWqnF^Y<2TdX&B^*S73yJge)RN@#6i{sf2`=cY7HH*CJZSwr
zVy_VLX{C_x2d$iq&q&Nm$;^Wv`v&QLBRbz;{h$>+-~(wvef}7A=+dP!_|jUX_%bDJ
zNOI7Fty|4ZgYMf$h(J!90eJ{Eo&gIdNC{@Eprl$HubLU3pBAs0sagyv*pyT?&^-kk
zj72z82kIDTI)<4CYs`RKjfkKFCH?GFSn-djM#0)ZRVmi<+#ua~a10_M5Y!`uhI|od
zv<Rb~0iD&5S^_E>Q!-1yDHnR!0yt206rdXsAVmwL`pC>n&q)QB=T_h?8lXf8NxVv+
zQW)$?s7Il~A)rfsb0KvzxJ(6|W(&Hc6;cvI1`vw%KqV`rYKNrLc*s!;C7GZdgrGD5
zS}g`z;A<EUQJ|v$4SHC`4q7g+p_E#YnyjP=Em9C_L3jBj<{*}$7Jvr2prHn;9E&xQ
zK;0xg(11)vY6UD>HKBfiBp_(pKm+6)O_&nff*jDzqD7g>CE%6uP#vJvY9Qz86{nWO
zLtUy-0NbYrZ`gp8fKpma9?}YN&{{<UJxe_UaMFhzGajCrnr)~N3R?LFT4)RNHt1-1
z(7FNe(T|8ZSy0hmo|>AiV5p}6KLrOgngSa9k55d&JUbZVb%o?&Ljz;IjQreG(B3CQ
z*pO6W3UmMy6yM<GPKm|EsYNBwQ#3)w8R;0oLJPMdUC{mT3Z@{1G2s4#1L$xj2ou?H
z&}FgU<2hj-1(}LCI5|-PGPwgjIT>^gda450EM54qJdk6p5_59$%fTytA#0sV@)e3w
zbMwm}t7$-ifigo4x;zHdIDuqHP_Yf-K~EY0^;=VuOF-vtfPxovhq*##9^^W0$hk=%
zH-a#A!V*3_2aXk3ItT*Se8w6O8WuX>*#uC+aSm26G%!?v$SN2^$NpifM?lSvXbo^u
z(orxn(2RvFF#<IwQWX*viXr(MrW#x(7#cuJh+=3+AvYXAt4_hLi&f|4g5(lISaMge
zRZzDuFfg$+H8(&7<|&pY7Rd&trsig*W~OFI#)g(AATc8gbWw|B0}FE#GgEV8a}(1v
zGea{IGZRY_lOzKJ3v)1SVrF4rY-V8q5;FvgB_<lCfdNd$!VF@Yxrv#jxrte_nW?1-
zNDQP4O^pdijadpzg$cUD4J?ukkkpu%8>g8Xm?j~7XKre4Y?P|b%LOVUY?UCfV64Q;
z1q&U}d6D2+nTP@oEj%@Oxgg0B)Te<|v7qq>q>)8XYbriola~wHR)dt>gt|wX0p83k
zA`BqF>mwDMyLs`MR8B?)5Ef%#U;sxi{PY@dE&}!3m6Uk7pr;xs!14!ZtrRFPLU)%b
zWELwVqMZAYo|>0h1ZsyXfO7~aZ-Lwl9t8q%K!tu$DJYBSreqdD@)|Ff638w{yHf$_
zR4ZOCh#@#mwSr2+PO*X&WiUQ37t}=9t|7PpXkjF%U;srLR1LJn3RjX}3}?bly@3l9
zS0W@q6EtuM&?2%#aH}1Bs+D>P%t?3-wL)kH9bX00N9vhYAfJE_DFJPzggF@BnO4}3
zv{Khmz;dD$FBkq3tw5H;&wau;(F!CB)lc9=E09Xu=75j<;pHNrK+h0l3bjtO0+|K3
z5SCeC83ZH=?zRus6Rmi;Ku2DHa$I6wa;grfuL8<~5PyK1c%b~7IRGbGVLi|a6rS+F
z#(AI>NEPV^T7iog(B44kVO2yOXvNC~$x^5XTB++6s_Q5~j<Zs?SBKn-gYz&eqR+BY
z*TiKV%1KrPj$u+KbdD7PYhY`th_(i@Ne?u(QUn=uf|^i_$0EqGaiXk%^i@FvS&+^K
z*7K|ISOr}JM#wI5&aML43QCN;T%dL>sBcn=7*ZUVqpNtiKuuH75mcac3z$b&LF7T}
zQHead3ZemID7?W4J&>pnA(fe?poDgG6+#kOFLWm}s6`3ttr9xA3SkI%oEpJJHWRwU
z1LizUP>g`&k+zsY$~zqex*c7GaOvPZx(X?rAlVUA6@W%ZtPrk;N<d>A!h^_WromQO
zV$lZ8bFd??pj?=RkQ@z~MuMLm0qu1oWglqo9_hq4SOi0llS0{Z4%+VqaUC>;4bah5
zkgx%zKEfD0)d1TJkIVt5V2DYCj;?~K0@(&_#)4Y5;PwgZ=qfB~P>!y`A`d;f3Rwo+
z3<oXoz;<*MvO=gckdLlHmIj-H|L7`Y<@k=SLRN=*<_)NPiA9zMb>)dax(Zn*C}hF9
z@gH4<t_7qmB_$f|<SN9$PuYf=xU_&46GG0dLQ-yoQ#mYKAQVFnt-@zMLNUlTJy1s$
zrv)YXISA!A4wg~}DS~d_1)B&ut_qt2P~!=<U=Ww7sH)YGEkKomIUd<c$T3mi0c6k!
z91(|AL3KkZP!xh1%&<XR=y`?^V>O|LBw`US<P0ZJt&9J#DrD1<YBekdK@O{es)tfw
zM}gBLXxs|<uqtE)AXSjVs=y~o!H%jzSAua=6}keDIncwC6l}o<#A`sj84WhqKs^?I
z%nK|AKw7{UtKUE}2m?{$5S!)TgQ}nsPzq!O{In&MgR0OKfDe-btp$c2um?S;3RxK_
zBnci=g{%Q&J80AtY&LNRRiSGK*-7Z2Ds(+KtR;9*71SIk1u_L5bSMW^VO0P<s0u0v
zr4*pM3vnJ)1ycvI3^WpHWNd;g1ZoSw8Uk3Fq0npv(hO<t<2<MeVkk%<wyALxS+s+y
z5ONSJQ1xp<Q!>b8h&*Uc1?FT552}J#4L;8aX>boTQ3fi4aU4_y(FB^AgDx{nh1~oG
zHU?G@AvqU3^npCfL+GF?@Nwm&oKppH5t5b2=Tu=x5`Rt=D3l@E;gcueX)1^~sFMZV
z`T+Aa>N!>F<&Z<E)M2ZAp<KwMVrq&yt`n*tc0y06BG(kSAqpsGQ$d8G{sEmyh2?B2
zkpI9ZBq)H6F#@;Vh&h`IVl*gNV9Tw*lPOp(|AnXoMHuWr7`P7TmD=zd3_)(e+M&g4
zk%Kx5u=X~D4a(i1jaT5Mm~=dw3gJJHA7L2Ws7C20fY#;c;86@~j)UC~OJvv<^h3-*
zJ~|3i^F!}WAogr3h#`=(sq&Plax@jhNJzPb<7g^~62w}4TZKGAM^izxKupDTG!;Y*
zsKkf#wjp~tA)?s2;?RkD&>HbP&=Kq4HX2kA)TIP<=|G7JsYZZquYxH89XJKG3n~P$
z4eCxR9Zdx>6Sn@3{G+Mzl%N$0Vvhh)HwAh!6~r>=HV@SEGaypn%?F@`A+?}<3QFj(
zV=Ph?is6S$fhSC1XCZ+`e^XITrh*s+DR8hJrv&Olf@VkbKoilB!;@f{0c0hp#KAaF
z3N&MkaWWOeCXgNw1{n!C(+M;~R$7o+q@f8~AOx}nG~Eq3;tHe+!~-A91PwEgZV(2^
zA)g=xYn6h;;d=Dq!RNxp$HR^q#pMcuCsUzngqGnTyCA-RNQ0IZfLB*z4N9m7U}sJ#
z;5eBIViZgj)|07V5e{lU5`8ii#9UC&!B3__2%(=$g^(fTWGaL%wDue<2ZJ{npr1^I
z&;@cF>;xQyDENRWQ1s>}rlb~uue1XtlVZ1`{9F(N6x$kw(9i`L0n4FajY#51M^D9r
zqb)ujVGCI&Q=w>w6my`>^WfQegdEB-0-#BI_?9@VCsQFbLW(aG?eLSS5UNnN3K}4b
z!&*C_lc|uzi8+}Hp<EffoC1{8^+HmSg>fG@5Do75!-gZk%L+gupnfX!FeOlL8k8tN
z!g?@;dKrnu@sLA_AlEQM2W~)digaESaVJwDTuPmjsSxIY*7|@dH?WgH36RiPS4d?j
zu9K+{rbADrLI^-MHQ_&*3RyKMv_RPp6tu{-CphoHf*L~t<zy;!<3L7Y=1XOCWguIz
zA9#dp9oZ*Sp<4%X6?iQs;e>?LazKo(U^$rz$qFPU$RaQXpCkcliC~k&x~U$ULXc4<
zp!JLKsK-x$N)Gg+q+nG#9Z#mh<z%quDL9!5o4Md2U{Lk|cXVJ2B;dzNK~AQ^axfJ(
zv%rfdK{Ia}h;yl^cq|n*E5R{YREafpf#g6KR7c}I3=FSskO}Z(sc<;~qzHs@A4`R#
z6Nw4ZgM!h{fuZ<VDqMC!2aeE=rNX5e<V5HR5715}!61VwhaB=jP&LQ`NL33N-vRMK
z7*SM!c*q#48gjZ4K6k@3K{QbLSSsQzPDN@35%g6^Mk;90WMXC>Xv+cd$5IjR93_l^
z!={&{0u}XGDs0Aqq5|`@5?m@F5k{!#g;529ECFdE=2$8utC5%>6Tleag28kw6&_c@
zj-|q;1a~_Tttv&=j8+_h>PK>qr9w9v)Hp&u;tA9X1-0qGTf+-gGxbz6HIdGY%|bht
z3duMmCOF!NPPN$UQ;@^Z8UZ+ur9!tFX`Th-6huacBqfm1D1O#f&`?m<QzvvN6}mOh
zmDsl6^BSS&6obr0?C60XN`<Z!-duy2fl)F;oD6YpaWR&@4n%>n0^y8ITI)^&dLj^F
zABQGrF$w6popjKO{`}0ml41?;C?foJD-<4dDFSrCQ;IEE62>F(P%89LgC9zzUaoEh
zIXSXC6?CPwjsjwQ+X{X}j*fyltmCI{r2zMsIwZ{1t)NTkbrjTL?o)>z`~mVV{L&}r
ziRzj<3hLngEX-;!rxx9fCE!z$Y(WFw;PWChKx<)Phn0ZiLPr5MbB#!xBxDxkgQ(Cg
z2N_5>MSxU6k`^enqUQv4J$0gvp+d47i3u_r)~AJpGe+75DIw<=Ds)>w=Aa)#g-s1e
z9WlpHVbco<rs87o`V){rAgruF;B1~k&{}kaaj>Zu@Vt95{1_^P0$3^qjh92u)q$S;
zg>(!RXsipNg}^aX$O@562Q4rIpH~GFLun4d1VI@-uSA2=W2it51_c)gBN<J^F;ocG
zgU5EE$54SYeK~4Y2XSF$t6RYj%F<C#2PN%dbt?tX@l)`83mN!A%Cm^$mq2rm&@+n=
z)}xFl!J-k_Uw96mLYRW{@F|26XgVYE@F{eCsI7IxU^;9q3Gs(d!Td%bfrFz3ns#8p
z0!=j-hfhHq3yUNaRWLzZhfhJYLgzD(?13>+<Y9(kL?rIHDm)WD@WZEIA%getDTqTL
zCpm#yPbuKTry!z`^P3=|D2Go$q(P@afyQ*nJA4Y}I!OAV_2E-6+pwfCN)Dfb1O#*m
zBxoW>9Xu+eZl!?z=qYvR$y1=(Iyt{66`G%mQ0{R;K6nb`ZU_dCv%nTNz=m01d}4Rb
zpzn$SZ77CsSV7t%lT>8{7l!P)gmXcvAT!F896SXHIPiE6%E43Wphb!5ni}A3sVK)z
zL3DshWQ=2{Kmi1v#ey6$0a>#MJ$4GB4Sr|^D2||k4n1}Xq5v{N3*L5*yjc#~0)Wj%
zVB78u(T{rU6hsW?u~RT<q?1EHSs%I{1a?*mzB7%~_0-W0ox*M=p+l#T%mHmo1Mfrw
z1u>o@r=TZJAs;veu^XfhzmGth^+B7*;MRb&<bn^df*w-=J#Y$QAX;At)oda|2jVOE
zaZ?b3u~`dVBLGTBAW1?8LlJ2?ieCwtiR3+K^#n=Bpp%(E20@&z49of8!=_+j(6fbL
zhfP6*q1rR^AzWqf1|i}On}X;-98v_T`VhxbL7fNAyl6I~95$t1uC56>t-dO=0Gf{=
zVT(9032G8Z7&OKTIm;;}vxwkPQxFG&^nox)FDQsWlT#qN7|A-2JP3nMG|J3LMLN_7
zv?UUG9vq|!hQTK}A>~Q%mVb!zKo%gm8>Rz9V>@pLw%!!o1Oi7*p?VNxI0z#`4(veC
zQB$xa3oqfKVJA(2kB~sbA!uqIbWBdF2GsRPI*`&HSQio>;t(XYDESqv8bt)A9?v0D
zFeihe1SvjXYnagE1r)oGpvRFiU_lHkI6;R@L7V{!GY|$D0g5;3B@lRx06Js})iRKU
zAPiFi4Q`MSGA1ptAjJp{^T62=RD0q%WJ<jVk|Wd7u;dV!;}CI;dc+jOmuQ7DHea9}
zF$K{FG9BT>G#v$;ZozLqqNxPllU)o-OpxXXNjs>a2TVcS2035~!iKixklSE*wzGhT
zh~O9CQFg!-#C}L~7||_)^1%m8p{M}wR7E*p3RxP~Fhn_E3Zj6-1ExR$42nF|Babi-
zOM)Dd1F-_69_nzi4wyoi0?N7IRvEM_g=sHZN`_hsI@$zla|0?*%>$+ou7x@hxdn;$
zfGJqmLe574b+oFXjZ@UnMm7*fIO9KH3gJXhfD(DY6hadyK0(J7fsVR?pB;zofGJch
zkl>)j0aK{<fSeYsj>|T19feC0n?az(+h9$g1O(zBuQ37z1vI)4hfATF0v;hJ@o*_r
z6CjBccW(pa2oNT4xD>=tXo(iB4s|xzYfv^U0TO?-6if$b0Xyc=QV<(KPD4NU0(P_%
zOh5F19g>fhg6W5hnIOjcAtRKai=3mOeQ@Z?Bj{O9pfy|M9xMg36Wl2U8AS2HQjl<h
znFnjYfZK?OfCJ?=(7{p=#fZuitW8Hjy*vq8n<Ev0kj^^Bu~IM}KoS;4?LzUfQV=IX
zO$U{|U^hZrDo|mfkCg&B7Sx@Gq){SHU4f=0SYsZ%fQ0a|QcySNfQGI!le2SD!OIeH
z9xDY=4k<^VEdm0^N<nmhmYRWAL!%ul1(Ag}XFy6o9tUBFC9tEgK!@@``*M&&zrlCC
zg~T9EBPlK^O-oY%T~iA>4k$Sla=lMZjzV#AQD#Aj0`#u7Vg;lVt3XFsfdV->F*zeO
zz8rjlIOGgs$T7d*#g&LP|DdJzu!D=BjseX)6{Qvz$3xN_^hhXBTMCw*;SKW4oK&!R
z(37^HOFLl7QXv~)Qu1IHfXk0!z1-}S%p#2x*vV>{X`pM2lk$sGtrVb1D!eGO1hl{b
z><Q3Gib!sU+J(qApbh}o6|ghebQHkdQ{>}T(fkC-NAO_LQGm&3=I7Ze<y7S+f(}6g
zITwrLT=Mf$6+!Z-K@IgS+%+&yL;L5T`bNQ4AvrHz1EB;oEd};td_hraT4n|0#5=e=
zB<P_@2`yCNiok~gLe6W07*&*-n1W;n^jJXbk*lDr;F$|bqL5UUl$xAaTAT`U7U+U{
zs2<S0sQCrpQ|FKpnnFopI^<xlG|<&&5Vun7Sy1%@9WTQbprB|$y>JjDf+IZ?B|*={
zMotYN^&kulSy<#l6Fj)M&`86RC_s9#uakm=0!SW&p{Y5fC=+s(Cuj}2LZU)aVu}JJ
zZ-Iwy6cS57x1pouC6Im)hMM7-1`dBvtSclI6r?5=6@zZYNJ_<ZtP|vr*W|=Jg_5F5
zkkXXYoYaz3h0GEig?!MZx8<3|sR}9i<#|w7<)xN`@?mjiejX@sgY1T3h#SxkT?Q3^
z;L!kxU!cJT&7R=yBqB7RAq`qo0?IWY3_c1kJ+nk1F(pMoPeDyh0mMnp&&|y&QP9m*
zQ1UMT9a>tf1d}Q#EzSV36$&y7;3Z*tW{GxjafX$?etKq!LwaUOPGXWCC_+n1QuR_w
zO7&8*^-D@KQuX008TCNc=<1dhfY&v`T>?$(AZ4KS)bIn}kWRURi-C%3sQTi{V(8@%
znfb6Gyx<bh6_B3(aP>$V>EU9~5qmhlC>73xpH77kDNM;la58fdY9V0{m&+_kEh@>+
z&nX7^3p!hhn98ZJLaYI(&;*?~2dzO;^kVWrH?*KAEiTQ~sL+hb%T3G4&j%e>1-*>~
zVhUIgEDtGric?EU3qZ%A#V1ul4(-u!)&Xsq1c|}QLTDlAj988Wzx4o;R+JTBTF?jS
zVB(+?eR5$*5!@mNEir}_j+w=vss*MNW-Vl8N`A3kZen&SXs}5Gt^#5$G<m{AA$EX*
z5VHFQa*Z3bAO%&fpraDOoA_WhfN~1d5KxVZ(CU+)o}OBy0jX+WszFA;M#aF!=qNxA
z|A#3683)>_16x=DYRe!NQ9#@bSCf;Uu3!r_0Aw*p0%QZE?gts8qmY|gT%4Gmiqm}P
zLH96am^NT2fG7dkla^CjoB{R*vhm6a;JY%xk_x%0B^miC;Fad7sVS)`;B5!c8-0wy
zm2YB6W>RKOW=SRVWJ8cAKr2c?9)d&yeC4qQ=$3n!RUikT1toN=B;=qG*w77p<0Lc|
zLFR*kMh~J4yx}Lev;^sTZ-QGeK~4i1193uXUP)?Ed^|WDbQF>k3rb3hQscpU=Zhhy
zk%AW2r{w2>+IIz|B?^f}>7}64t&71`FOs*x&WbNCNdX<tYKyd-3KHZXD`84N<xFP2
zf~^9q$N?Sk3sL~WpandsIcZQIBKaF6iAYTfwhGWhifSsfAO|T%E6hPcDEd(~!omfl
zEHe!)DA1h&a$tOMNoo<OfPosCT2zGaD##RwI7AJ?rwYmnxv9CJBS16qb3hdvLKE0Q
zsBr*ld!a-MLNzqwf;7S04vo`_%o2E%f|C_2*@0aJa(ik;NopSC1Xn%KB|VvW>6!UR
zL-LU8WO6d|QV}QKf;6Y3<|u$HL}&tuDl4QFrKT#B7AO?w=cYovhj1t)9AN1JWIZ&Q
zAe6u&9o6&bp<Y?62M=?oG2lQ4c^{J70zFGob2VVKA$W%&XfGGc|KNpY3bx?e(;=3F
zD$TS~P%i;e$%DL8k`F4lpf_G&Y^X5MGckhb1+{*mb#@BUk);TO&=<ZzZ3eYoLD?=Z
zF-HM3Sese|YUn_&Jxxyq^^d^&(xG_^vMx`-R>4pKR5+z7Kn3z4O{+raQCoVTKmfTO
zk_JF`5W~{|ObvLH8OsK4h<8AmVetS80?;*hiFqlI_=bg3W-&PM&|?M^8!#od5FL>7
zeSM6Apy#<GEJ@9QTa#K`0t$UnO$DDC4D&F^-QY9{t){_j%mcv@_A5gi7@wS91UexU
zx+)PV2C+*Z3;^2z3sle=VrU$oxCpuf6<*4NGa~4q_3U!c32I2?6g)ICOF#tzo)88r
zfQB!~w=fIAabBF5mKqOQS_)3(;1V5{?m<BWvjrv9f|Y_Rxcm}q5sOsD=wWI^gb1=a
zu$jm%gQNt%{Gwd2qaZz$(vs8^9R=9RSJ2o}4mf$DIT@4?Z6O&JEzN;tATe2zpM!8p
zN@f~p^#kabgrs5(1q~3RII*ZGu~Gw;SvB?2auQ2)L9NO}s1(d0nh-hIO@E+(%PRsc
z&B!azD=sW50T~BsVJd*F(b7@?pIMEt9h@}4)h9gjgYM%i%7q08$VAY9Ls71RokB@|
z4w|nZeU>yOzx)z~)WXul9Po+f;I3>=Y7yw1O@-=`{G3{Ag}nTtT+qRVAcv$DrGjox
zE>A30sLm_Oh2K5~vJ8Y_rn=^UZ>G?NbfPjr7dL@47AsVPOwlXJ2enZ&G%@speFcd|
zaMpu|BytSq7N^@1b~vb$1_>@uqJ^00V>DP2h60X6ft((|fq@yt1$v;Hm_gwP9@+o}
zI|zf~8cB+hNVl!ER!B)L%LHHGq5z3(B%^IHqHXX*yKSwGLUkU7V=)r~thN|R2?7-7
zpc^qk1&_XhhGD9%g@QJ?aKTvsLFF+D9|iawqoA~dH>H8fwWMMVkmaD$@ImT97?R#V
zMHEN|lH4G3bdbOXsR3bF+8|WWfbuIyJq*M24Z5PFpePkqzQd|=a3VskP>`!Bl<EvR
zWDhH(K}8e7Je<ZUD+Cm!>OyTrTww-k&1Hfw*Z~cFgOAH7R?sLdPE9M#0e2zOQ%k_(
z#EIa24GKkh>6*~S9w?My{YMZR<PfO&&}ItQPdW;a{vV{9h~mRU(D;~Neu*dO?o?1m
zEj7ips3^ZkLkZMAgPfrY8kz^iD<nmL>;TC@O@>;BR5>G?4^EOG)oG=9SZV=y(FZaV
z)D|g#ZgWY^Nkfd@gBQ0!x3|D5UWiwqf-pBg1(7`qy0xRY80i=w$c{{?N(F2kX4oQU
zP#YTJWl+$<?0`1P5fUJ$gBMc3RK&wQiX0LkGa#8k$<EGBAu_+TNC7fghH@npLK)0B
zco>1C;FiI{AzlN<!eg0hML}wENooqzLIsVy{E}2Fh1|qS$Z1?jsi}Di1x5MEsl~;q
zDb@<0v2}%#)QS@56pSVgC&I0UWdx8Dp{-%iel}1uA4jtp5q_A1+?i?6^a>hF2udw1
z%`Ae9EQ7|tax*IwO7hcFL4&)Hi4@Rn0BLDRIRfMg5QYXfDCvT@FboS=m<yqNScZY7
z9tefh>?UR?98{X-m%t9AMQf{p?1w6VBtk6mAT>~D$He5>QX8NL1;{!OhFA&R99&XV
z37VAxU0hO{2QH2jAid=n^=fc_26ZKBW7LsO*#?c}mMbLYDS+EKR!TYwu(3;!>BtzI
zQ()-^lvBWcDx_dWRtz0|E-6Y(PEATo&PF5}WK|Hy!xia4?njAFtw`2DuIVtfC{a6j
zZLx(fv=Y%UK+N`|WNCOu3z`m7pa~Jw%>s=LprsOU48S`JFtxVe;YCm<2{|i*jLyg}
zkB5b~LVgkC`aqafXl6q~QYj_|Wo`uB6f{i)V-%(zhoxZsFgZ|)$SnZ*11U*hEg?W<
z0jTOr%t7xHfg&0lN}zfNUKZQJtU&ZZY@tOJl2s&Ra-4Y>5sGw4Ul8lkltAaezzTTC
zZj0375*#@<B|jB%0a<QhNpc3fTMcTHl;)J^DS*v_=4GVN0fj6GL#)J>#R*3yNIwXJ
zRl*V`DB7_V1laQqNIM9_WDT+8EO12$+M@z)rXbgin5hYoSdjcmOiF+{06O%C<Rn8-
zE`$`CfdQbq(7@x2;L%2yQ$P-cCP`=}2Az?eSAyYQ@C+izY!G&b-e&`g=<<xr<P1<r
zQIfBalnO4aQ&aR5oGKMkQqvMkb4qj+lJZM3AeA|2s-z?}57gcSUA$3}T9lg!8dNL=
z-NFQ^OEthZ8YC7gl&9w8fL6aly$D`<U5qGy6N^Acyyc~U%z`!?Kx?5uBmNqo>o8D#
zrjV4HlV7f>r{I|fnUw_{{F(wfbUg#)=86&p#63NkpqrKv7D0|qf*I$SS6rT21Pv&p
z$SSEQQLt6e0NwHi_Z6gZg=BJKQhph7n$0WCO-d~SEn5U@gmKgIOY=atbt=G2(uJB-
zQc;3Za3hipY6yTX#M4nIsVLC_T^<CoLQ_Y<2A0&JdCU&QC@V-XTPZ+hgA;QUAOQ|;
z{)0T9Sd>|upN9w*lth7XtUr1x0X5)3*CeIFl1*kYxQzm7Re}A1>IG0e2C)p3Oi|^)
z2^4fW9w`2hJ&9B=K)eI;Dkvj>5-!LbkVi^OQlZHmY7V5G1!<4N@&-r)c%=hkHVmo^
zLV*{)Kw<^SQjji4asm4Y)i%sD1B!TXRR=li5@H-ungYoe<;Ei)*{M;isi3S71i8))
zCZUj&ScKI0ge4<eh13dAo1`QjDxy)0Y&f)O45{m&Ap%OmMW8FKixDXY5(^NoftMkG
z{e~7Jn3`aBxA_;Pq!y*7fR8WqO$DcSjgra&_!OxFxR8QOTtch@MN~Yfvs;i@lnS2`
z05$n^z_ZrH$dLq%XQY{bJ*bT?naL%vX*!6Jd8y^tjMITsQK0k1kxkA91wVLr5$r;c
zv|e^<CF=ANXf<#WXjcSiN*h#efWirev%x2HCh5h;=R*Pk`I1$*q0uqw$O^!#OQS)@
zm4nO%(V2M)*~OrxLLd&P(UF;_paIg93)%S<AEORhkrf{g8hr%?Q+#}kx+e5mP>@@o
zVFU_w&@uqRp#aJ<VD~A&PU&+m$}cU@$W1Lt1dH23!x%YMVPOXzO$ROV49x>GQ{Xo=
zLNDe6ou-qSn3Gc(58g|hqF}24?q5S<zXW{j9q6<^(7K6`%-mFk;)2w?6ovde1<)K(
zW@1ie6=XnBqaY_SIW;3cCndGWTA?_#1Tqy4nkp>H16cqYq`)&JhJM^0s$;;*-B9}5
z@!-wna3P4B9SVx_3sQ?pDj{4@Y6B|+7cj7J0mTHUAqviSU}**H^911h1-B;}DI!5e
z<U;Zg=&;mu$PK3HC7=Wt4ci6^(ypVBUZM#ux*?JvG2~<jN=@k{uxomwm7tSP$*DOx
z@$pJJ3QDj^Af?z?kXq;>8okV7aP(_{Ob5-hWfm8gLQg+QFVRteG%wH`3`)kHAQwVi
zj#gfR`~-0xI80#s))hQ8VDnYBAZg@&6f`}9g!Lfd1WK?VgB&1c6sMMyq(bMmQ6obK
zQmrA!36e1iwh(E!ahUN6%3rvn6(ox{Rv`)?lR(fwkB<js%Ea7M<kA7fi(pO4a7Tf-
zpgUGkya*Bmy9?|?O#gx8A>M<T0}hzHOw8t<vVwDB9;hu>S`4e6!Bf1D{Xz;wsX3`-
ziFuH;2VQ#uizvuqvApC|q{;}?K|;4NJ~1Z;9B=V3oyB0MBfA=uYCw?*N;u#c0&&m_
zSm;;=NJ<Z8lpcDH#**GpHGoPLSVaL!L@<o13f)-fPFs-QU{xLD>PronH^8MIs33$a
zM^*syp=BWSgl2d-0<sh415mvK%k{_`S3p@Fnzt}_@jxXZ1rYW+wJ5bXwL~L12c`Z5
zg|kMXPP`^4pp$co;o*oStTPge6H7{pGzvl04cN38b(m*C9)RIOJ+LGwOd;+8CAE0a
zl1hwdhNg2^`I4Sm0#*j9svv6Nja0B1;JN``hl6gsPA-8o+(4tOm7o=t7<00q`zbQ>
z(m~7PAh&BGt$0gJDh6$EhVS@;Ew~1!IEafiK_w5w8zBFIWy(`Bi*z8F7@DuJtjYta
z)B$xfvQcbDYJx$h^`Xr~*r-b}*j^+@f_F85Ow&WRS6e|T9=_WzJ|#asFTVtQg?M~s
zo?c2~38E1Vays~qDsUKqx@(|@1g^myl$G|0U@h<qpCN&c;tRB8OORE7NDGXiSH*z6
zfYr0ei3p+_<X*6PWTzl4y9Uc)Hw&LjLB?WrEl2{C@6wd)6v7jWK&Sp$DTHLCDik2*
zOHve|cl&{osE$H4Y%L@#5#e!DEhxluGmAmpTu3)4ttb(4;WT7vx1WCqIH`a-yqS5R
zrB0wubWmzyaeiJgc$gft0$CxcC_g(jPa!8iIWY%P+UDed+d_KKas}iML<oYK8zrD?
zx{=C2ge*ucddCg9hk#ItGsr+jK!##K8X>s^lu>gNGxNat7dC~GSyE|hh#Jj7rFja8
zIXU1FgJK292taX3VqQvOQ40LHW$;8n252xiEio^-64syt*$W+-g)N<eSPqf^l>*=s
z$YE08RaKC4+d*X#NDT;sioQafcu>IyjyibxixgTQbx?~yhj}3^0ZD+5^g=zBUO`#G
zQ6W7)KLs?IQBetQf`Q8h1(0@7Dk#ZEGo&=LBo(wC82`!MsG5*`20cx=G%qi;NTV#Z
zC@H@f6xeW<re0BLo(AL?a8yIUC#|44ALK1K)>Q!e5LH(p_&_s|Hqi0Cpz05#9u(>E
zV2?t3l9)9*IE=vw1H2*{HTpC2Ae}IUtkU8V(2x#jSy@R&VhL!x0JLZXG+b1kSgEIn
z)V6@stMTAs0emhA!ljV7RUjgA(-csuwfN%1vQ&tXI4U;{O+6$<$O#G*5FlKr2QeTz
zMjaIEG2kv8B-{`#1StezWKTpF>LK^yVnHjlpk|?JM9YvMb3mAYS=BM>dHD*E-~tVi
zp+*=eAAr&oc)7ljUO{Dyx)o@RbuGebXl;#b4RQiNE~`PY4X=}n;bk(oqla7$5FJsl
zKn3kKgH%by=)nbZ3n(Z+SQ(uCK+QOi-=L?iqh)SLAqP6O8+`2>tV;x)UM~jCSAYz_
zDj5yAS}qo=kst@aY7{3(u??%L6+mMNnfZB%MU@K0rAfJ&C7_wnl6-Kf2M&I)!(ru-
zLN&ymTJY)R#idCI3Aod0^%UGtYZOrZ4r=-!*9;&xK|Bm%VXGZL;s_NW9=;j^B#%&y
zB~c<IK}r!d1&9a35H)x;gT0Ak8W%M^fSO~mkfZk@%oufORK}=7+s>%=f_Px#F~$g>
z`ar1+ViZ^zm<d{Utf2(T+e$hLhDdE8kZBPAfLMiShnQk<Dv>6EZj?nV??_PsrDr_u
z1I=1H=jRn?rlb~ujxo+J1J$75^}0yKC&-Bi$AWlZAA&~S;4LetC0JaJ7RZRTEO-JM
zbK(Qd{xk(8NH2*94}!K_6{HqrBo-8dI;%zCG0_wS!oGz26l@hiuYpzJ@f7xH8U=7K
z8k7k^<5Hk*Q)W_WNh)M|8qx#=8w_d^Br2qp<|TtRVSy$PQb0|Iq{O70O3+McX-*2b
zOiD^cZu@||fEsj=el|!PhEe5Vhp8wfC8nTFjzha4(5MELSuiH3K!xl&LLY_(%|t+p
zJ!J*o{GwEaqWmOK)eY^~>M1xDfQE1LN^&X{a#HisOEN&Ucv51K0w~^6Qei`SWr;<Z
z`K84QX^F`t`Ji))it`ncGg6bY!9^f=+!Um-xH2~>Kc^U)dh@{R{GnZ5kbgn72&h2}
zn)5)}2>^8?NKHX9Xoe<7FD)}KMMFsy)HebNgK!~e`8>F4LezDj-coXjf}Mf^k{KYy
znV`lR)RY<}9fgAA5^eM{ATv!N6QmVfV#BNiDbCb3#J08#WG>hdpb8ho4WJZMh(5mz
z@jq;+EC*TuL2GBE83l+sa2FcXKtrDnK#4@i1Qxb<hPWrMG&i*<GdVsvKd-nXz911a
z!Uo!f5ThQgs~u~vpkZ$X#xXG|S_&~SdI~WyDcTB}3N<iMZIGxvXuTjvX|!u<tU|Q5
zZmc~_p?!=x#HOMO@L2@l-G!MssTx>q22KCyg}DYf`3Ji~{SWF)*eVoNfa4}HCr1P1
zKZubKspzzv{KOKCL`~3;rbdjqUW_`ZoJj;#@=B@TLNyU~scIfr9VlHTg4N{YK&y|;
zG*EihNX*FrMWm?#a`J#wa0qKb(TZMwD#6=WdI(lQr4_8(siTk!nnZ+7*(fUMz^#LN
z6B^BlIXRH*2_BJEK)aLz*2DruJ8Uc$+94|eHO3N)O2C_?VCQNp<yL}<opjKCbkKB?
z63hUQ7eH+}Jy7N;frKVpEWW5T57bpg9W;kaLkc)s$iNqxR`8jc26~2Q!k`odI;l3V
zKo7js5bS2X5(Rz8XiG{7Xfy_70SLniE>Njrt58yylB)+IG{7Aj@TjpKZ0|crIk>%(
zSX6?RpTWKZcXU8)d5C*J!3Gfm$zp^BlD|<iEJ(E;Xem-6XjTKl$jnRE3&{X2XwxW3
zEJ{x;fw-)w6xnS^8ucI+Lt`7{aEN4PZfZPODiv&YW^O9DCx+^1=!|hmDmdWqIUKFH
zgV+IfCS*1e$$q5!>OjgsqkjcSiACTI->~gAAOTQa1}d*`4J(1v=#>_vB$lLVpqH2+
zgZ01)p!cjoA_C-n5Qa;Ghxb4UA0!IGh`m~R;60F_@BoQ}Ff^-x8fwt6QqYAYB#;m+
zG-0NJA|2dUPR&Wf2sx0`K%HK!i41aTf-S-+;N>8hpc>uQ0F;3f%Ti&(lqspj$&eHP
z&K^1ndCB=X#kPh920Ex-26+G#CnXleLzb4=#;8~4#3v;d)mDRu8inZU)SSeE;?$H{
zo9d!eP%k?(FTFMv6h#Wk3ZT`unI-YX$%#3swkahLu5FAu^n6-KM;Bo`sMG`ZoFNw~
zgTp>g*H9DGtaO2Ftc0gC@TxD+hWDIGYlWQD#IjUS@u84dqF`yMs*sYOm#R>jSCR=D
z<^iwuC_$ba0C^8Oh7Yy`maouqC`cJt9ux_BSkniJ5|mMOkPHli?@xwBH%t(e6!eO7
zQd0}Sc}6b<w8jD^38E2JBc)xCDbNk+pkf@f&<k=+9c*)Yd}eW8ehGNo1*8^IO8Zs@
zgNAz$`~AT3;PEN21UQI63*3uKi&7QJQx)=3K~pdpnZ*jAkW9?0%*`)?g&?HK2Wg*y
zZZ!n$wE<rp21?k;*%~l0SXu_P?38q&7i;Dw7HA;$nODH4pmZR^il9jx#QaolQN9Lv
z92^uJ&`bs1`3}l5pheSQ-^GK*YvNN<(=zi?Q{o{r`k*P}444Bz=_)y=7*wAk4ful$
z2PxLe%!TY{hbaeH20BMIwIl=XJdj?{ECuXNL$s6vbz3rMUp`1Xln*M9K#>4)F8FS>
zBJjknMsap#fvpkDJ|!h3_tX-FL<LYE57g*PRDdW3Sqd6|g5*5~$h5m2*w1<(m*=N|
z3<ue#prDWdGBg1+XPumxmYG_tkda>wsnT;&%TjZSK?WC>BqnErM!7(070NR*b5a#x
zOY9+5!ZRbtEqV$db+(2Ixv7bH#R^L08K6;|oSb5X+*BnUh@=sUa3$2`N(!kZ$$Chx
zbIemn%`GUYQ~(X*=cR*6Y)BZTrhpnnAbSc@Qxu?11$AvwQ$VAD$=M1S;OiD34udcL
zf%wE0lxRQ;yg_wWayG082G>^LnS5=qBO!XB4JfcqO$8f}=|v?9+6sp7xP-V!Ax0h6
zB82oLz(eMsCDWkLiv}AW8*LB^^BpLpkh%nrSOOiqnpX^VT6$^;L`(z8IA|@WtdNkK
zp8_%=0o*@HN(JvNR4C6#%~OEbn^~-oT9KLzS{SO3k_p;|nNtZ$7@+oWehzd$yRt%v
zzl*;{NosONNq$~twx*Q=NFLk|hm3H?$Ab!I*i<hld4m^m!2PZOPJOVZJ;cHA)DL%U
zN@_`JGIVJr$kp(W1(oSJX^?g;k~j6z;=ywU$dh`?3Z9_y8MIA48+1-OsMT4X2tC3C
zVmzqX3|YvTk*WYWS^;{*0K`I2l2=xUh={NP9hw4Oke8YZJ1-?KUjfy);*!LY%w(tm
z-~_6mtB_d&o&Z5M4`eE+M-LtfPDJ#LK{*4V4&-fEFk$m)G%P4$u^fT{4ansDlvI#8
z;4y%r)VvaKRO_Y1gCz8l^TDMA#7fWxdzflPTZI^P8%W&P#i(0>7OIrw7eI22LTPa+
zXzd9^8^|vp8(=O4Z)X7sD1g>+!MpwWMGA?~fPp1vWn~4&vi!^xh2+el<kFl(&_GUF
zF}Onsb|t8?&4#XrR8~;PC@Co@w$j&6$xklUgPi!FmtT~wZ=`3gpOcwXlvq@$4=Z#t
zN^*0QOEOY*K{HK71x2YPsYSZr{0MP0XwN)Skb?UQpr!8+S!nK5(u39{Am6~dn25>%
zly2g|E(P7=4o~0E$rPwlASNn+&S)ssQHTd4@VZ0rm9?muH?b%kEL99L52jHWR7pa{
z3&16GW>GO{^8;uov?R3%Jca-k2MHijQYJLqKoJLWeu@GtvB1oQ7E<7S&)_fzv7!xP
zL5n+JYT<qYMKZ`J(56?=lm+NYUQi<$)NqJV2d8gPdM-*$gKT>Ruk8f;1?)|joe*PS
zwKin2MrkTo33!+gX0HyoY=S0Xh$aP4;SJM?<}Qc|_#r}>CB=}b7F26tsxpgV#=zST
zI^f0FnR)4;B%4_b%IXS<$t9qzzTi3#RQQ*G8XtPFv<oo|MnV$|Y<&{Ud{79%n;4)a
zY@k{i?kl)^L86c)yP!S5Fh@hp1WAG{1tm6xlF9<mFefyHIp$R&PV0eG0Xhol$spCB
zW<YvoS!!uPNk%Ft55kvhfYU2dvV&e61Qv$XYM`<~LBSSMEkMf(h_ljj@{>S~EU<FD
zw0ICd5!Ms|*#lbE0O~|k7NkNOUsehRP%nezL7HJJKR}HokQk_rNlykPfOv2MDoQOX
z)&RKy)@%U<Gw3cHkf9(AnxIM@!~ofbmKHM8K<y0BXru;61$dzs$Sm-pFHm5DHG@RZ
zG(s(b#UWfX)QhP4L4wFN4M;N#gLD?dN<$C_>V23zjD}`Fkcwi^KntRE08@dWQ4#}4
z7%BkP49c{igR)EWl8Yh7zJdG+N(Rv8JJQloSlb=Cavd~H15yY{<>0GsgHk~!T_D=;
zA*s;$SFkuNN<aw=ZPW#P+<dN%0;u~3I#Vz+Kd%^RavY`;<oaUtgBS?ug`_^PR?y<Y
z;?iXBHab{pf=Gkh1RkFPZ?1$+&noEJfeRC)*ajc<1S+}oQu0f{^Iu>NXtRVP%0?H+
z_%cMYq$o8p7jkq_P97|yz#W9J)+e*LL?cGsH7LkG2$bc)y1~0VK<fuU8bBB<ilNEP
z(bEU62Xqu(X;CUj7bu26x<DRGOi2lb_#WJsgIvIF1)f&}_s~EkGq|?}K4S%|?_r^j
zWE4m%QZfZQ4<5*{w2Y=mS%I)YkPw7il?5tv5Q`}x6ETqPP<%Z6Dmh3@f;<b+3XUt#
zhzrCFlvqTl0*9mq(p7Sp#vn#6Fb7c}R)L$_pt1{j+zDg{2rHwTr~xfF!Q&vsnMpaR
z19+7jy5rzi$;GIbgIaIkz=oVr1FDl?n{Po<Fg8dWa$s(13P>F8y%^|?av(uSB!HB{
zrq4j4pc_QMHlXjA(*&smj{$*{ft7%HNb~KGpeOEHIfO4kSrui|0DP$;D6T*_&qu*l
z!2)U15NK!=v`r7TqX0Ctgp`^<dN5o88AwCDa};#H9l{|XZTM0aSRPbLfpZin1duPJ
zLp2AQh(YBWx<UnQ&~kXtV7`_D=tK(x9fdp}1ziOb=tXM9#hRL$@Ou<djYUeiAlE<w
z96BTgiBGg(fW|!P&TNoYSW>~1QC5J39rPF`ka7?v=O#Q<=Y#5f@TG%xwh9J%*y>1P
zZofk{6B^R!MJu=f0PV^{Dp`?hEkuEbP>EJ1f}<HERt#GFqiw5zH2M#ppA0TZECQc3
z0BW~`6oZbXMJ|2O(<x@^Mva0jR4yX*fQAx4=_x(6#3dDS(r;#do(9NHB>Pa*BRG&*
zMh);}8|Wq>L>Pj7iBJgH_5)f5tfi%;fOM^@f~tZ>9%$S{M<EMBAv7V`kFhSn5Gmhc
zWMriDOhldrI~DsKtf<z)ris8B5YB}74cW0d#i?i+2AZ5e0gbMaV8+F!2{kXH1$#Uw
z@<6pzYH<m2y8yD%DHwdzA2|IYr(94#fouXTd?*7=MuSqhb50^Cd*V3AsU$H6^Wu86
z+>JJCf*6GWmHVLkkxOz?Apr;(e+QKa$QVTlEUm&jOnNBNh+qJjfMqQWNEk&2C_&nk
z=0SSDpit9O2nL;qmzP-rUV)@nP-&+G3qp`?5LQ;eFavyk9ypFc1J$78zCigE)jil#
z7<iTrEfitt48<Cx^aXMRI1Pf0Oa}MnKw_Y_A7~|XNq$i(N-QfVD};h}F@Oj1K-)q<
zJ;YMT8OvZ_BV0;yG5|RebjLNeiV$He$a!#;@M%qm2<g=&IHbWTSPxY5g2EeD%?2G<
zwN*eaFhH(>#3I<`aQA>GK|q9tCTNNbR0d-@4x_XInFeu@l7bT2&9R{S8=z682X5DB
zYJ&579!L{1R#MPVQqTqsNNYes1*A?%0XjFOsRSC@fJi7nYB@+RPDx1<yp9yHbT2bc
z0kSet54L^<t`apmpiYL}*9}VL-~j~qL=I?LJGBVb8;l2?lAfBA=aQP7UzAvqUj*$q
zVmpW!wiN{2Sp=0ML6C*HaHHWJMzA=n7y_qX)Xfd(r|%&Zt6;}svmF+!kPah`<4R%u
z5_ks{Y7<10Erdcj_74(zU^jve_J#I=L6Hk{KgeQ;Mp!`w<tc*~FIDE3DnblFRR}&F
z6Licb7D<dy*MJy_R2o3L`5=dYcAS*vlz@BcP=(M98mJv^uv*00Xi(IH{8e5AI_tYg
zBP~B4<|UADZen_7GB~%vM8WO{m7Rp=(ZE(A%4^Ux7kmy4WDKmu18Qi2_#m8?pAWGP
z5+Gpv_2T1G@{{A^K|@aY`7j<t0W<`_>Og)0rwq`r2l#AiSOSJjTgF2@siP1NipBWs
z)Jh$NAfJ5D$SznPmVpp(^-xk;0Gi*3cLv>b;F*^OItLdap`ip9SJF|4R?5gMDF*Ez
z1nqW6g|HGUiZiP~GgV5U)-#wL3m-8CFGmIiybfppFEs@+PYw%jB_$;VUr;PSmn}p6
z0P#I+%sV}^B%>638o54X<Xb-(N+;#yC+UMGT=ac1ll0*M16te+E{EXrSh)~4qRbqF
zQUKIGaHQZphZM9o7kbVm*hlf1DH@=HOhXCGRYD4N4bWJ!jsoOVO{lMtm$g8<ACU9|
z)(u*8Rg##QSFDkpS_umpP<jDj@IW#oao}(!B(y+kK^WQGg3=O@k>EXprN}1}U}-|Y
z>_OI_o(k5FnJ2-DA@)MkC^RHNsT8S@EG_|!#2}BSK}2Al1Rd#+3_7A0(hvi0uK*3p
zL03;h4xcCnRr`=}>C)oVBG~z6;KQasn!$6fpt&qqm!cT705h`~d?Y65`~`6G2T6iM
z8@k94vW6d;B0$6YL8*x;y7_rIpvl$b4A?9@M41sx9k{(wq+4DDTGX8fNwpw9gF>E!
zZF%51?)=gckn^ENLqZc|4p<WGR6J5J7r`tCrDpJ9t58$HX$2O{AXA~q4lIt^ScOFb
zXf+Aw{2p*)88$ej2a^R&jev$$kt!;Xv5@3~MYTdT(ntq1k>F5_;s?+)AgE^b1)p^e
zb}nR`4<dl<CGeUxYy(!1LmEI0K!_&fAxr2&8c;6+p;bXw0cr6Yc-#$a1|(S`c^NW-
z0G31XAZR|L7-d5U)E-2%7K7GxfkU7;BfkixPl}YmzzQIVE*_M96~G~&2Tl&)LI>2n
z05`Z|)HR`XAh=ru@u^>8F8JCJkYy0ZLmElNDM)re7uJEb#iy0#fub06u!=3@JPlB^
zf-uA>pea9Cn$nC0iy<5X+QL$l3ZHIMfV7qIJ0G&J9yDSDS<(cqDj>n6tPlcP3zJ!_
zkXs2p3j(}*3^Jdkrx2X4P!5`Z%Pdv^U9ypw1UlF~v&0^v7nA|u4g)z8Ii)1S+AW9*
z8+j)V!~~F^K_}~iH0yx_2N9+S70{%tk*t#ob~#8F$nMg-%)(OW)*?_uflNVmLbd_S
z4RBW?MG2&SK~ATj38GBU>=67e3ccj~(!3J*WERL`5Qb>YHUKrhGj)*61W7`3J;*W8
zokm7@Ya5Ur=rSr06NI52&Q7gF$)`j&XK^)1;U^;^!WQC8&;~z95<(6SupBh#@={VO
zQb8k&NHaMoGj0$gLD3SQng?2J1u-u^6>^ml$SmZzfrb)nrx-{Yt%w9$jH7@A%Mn>T
zLe+w|2qF89NEaC4bAk~mZUEIEpqdus2&B*gyAM>4fQua+1#l}88p`0cnaIHg2`6j;
zjFbwY$p@kqQBQy^0!@2D8YoaJP;wMB&OoIgNH^3m;HC%3t^gT;h*DTq0G-1ED#E~K
zqNHxH^AI@&q#bNO$reMs56YyOdC=k!Efg><M#`f!wb=;SYRJ*o;N!HNp|PI^T5n;c
z;FwpbqmWyi4rVFXDu9ph!kk*ngjaCrWg${>fF^X1x1h0r2<6Na<lqIBJ<zT^bj!95
zWKtfq?M(sH{{Rj4WT#dt=_q7^sy9e725AGEj^rN=ut#9EB_wV^ZQINg(9+(b;u1};
z|B##sHUTLCB2*#=8B_si!#Fs^kev^Wc-Ss-*wF*fl`)`{57&lBn3@WpPK6TUmL+}g
z*a=#T47_eBK3+jvK}oM5I|n|F2QKdr8(cA*qzqae0AAk#?nr}|qA6%(>ZR&ID%O&G
z1(0rql6>UNNXZ~~!s;$$A0q`9G*)4rg<Ky5<H5WRHb@gWj6iXPe4P*u9k3t=DaSkP
z3UM8%kVI;-fpme-`pPdz&C>uo0@Q{o$9JO{<T@zG^>&b?fR<;CbQIFyg_W{`Q)vn4
zuu|~i!o-~7eDG;S;B*E$`Y<OmIkO}Ob|nuumXPW=kom}g0w1V^=vRa^<3NkE!98(>
z#2nCOgGvRE5uj59!c!qjkU$f~ATiLIyOQ!ulw(>zVE{>xh)e<!Q&zy(zXIM!p91OG
zfYz=iXMj(3huK=3TB4^A4sC{|=Yz6fex5z}9zT$QAPf#-0tp71u|bxBjDUC*)K62=
zQ2^^lO<b^q0j<9wDhS6h-dpQZ5=#<6D+3XCTWWx!6yyVB42m8^28EVcpwrb8OA--*
ziKRmg^&+(FQd9uj2hK~Nm;xUHh~!zwI&biWSfFtXSb~5=8hB|NC>Maz4rp{IC^bD3
zbn{GRUOIf(QX?(DC^a)3F^&l{13n4@@>WnP=#aN!g+ztq{DMl*YGrh7I?!0pELO-&
zNzE(Cgd9epkXn{n1U&%@WKelxrG^%$6<?8@T2P_@PW7Nsw!9RO;d&{s(NxgBBk<u_
zpas#8PIyjEViM?L5u`3xaRF%IT7GE>w0917GswqzspUw%EX_-X`3&R^&_+zqF%$Wq
zP60w0WZ^baDpFQ(tw>A;?^n#sPF2XyNl}2i5p-`D_$UHUFDN@TwV+r5bh2JHD0skK
z4ya1dDrzK02Z4uWPy-#j@CrO~omfzi13n=FB~o-0azNIDdbywgK@WWJg8KXvO@;D&
z(ALll(1r`pE(=6T3qlH7TNFQo1~EX6N4Op8Y|xN4D5yYKSs_R-zbG>ud@iRgIAMW=
z!B|-#H#0q>1awFV_;gNG!@%)h0y?Y%X|NZXK7&As6J!Q*RKeFZV+kg>9K>U2+7JVx
zsDX<*I;yOop`{s~3NCb$Qo&a}fg?*PqcS}+H7_$+2@<c!hAR}8Wai{3WEO+(4=c`0
z&r1YdFs54oI?|{Jw2xXx0bC)1?(HlGDFtO#(BUkg6kC!Hu0azOia?uQa#IyR=Ldok
zNkJlL1!_rZQL&zaTYixOXsZO|=1A}kkBr3R?9Ac}1;|R>%+z8%J-wt-@VRc_6Sk5Q
zi&J&L6*GAJ33jC!Y^9%$LUF!AW=V`X<Vf8_g``Z-(*EL7=(2QZBp~|}6d$0%M8Q@8
zXL$jV2iIYsJO;@x#o?JH8Hh}joKp;1(gvLuf#o`Q-ULNBygbRtfv#`{Zvb)j2?k#j
zqu}D|>>uP9;vWP`RV5iv7lU->mx6W~<rkMA@*&C!Q-sc3(9UB}94IIiAqqZ(HYID&
z_8CY@0+|fjf(-Jt0%*-UJP)8~R#GU-Oaz5ELKjE}wsIAmM@lmD^H5v|U4{y6&LK?6
z0d-43$pV^_(Lxk@aB6-L?BqpA_#%%FgF60cppJhI`XWn6jRLh2)PjWE01e(Pt&s+5
zJ1Q%9<d=iW70~%ApdnGnA{Y1(jj$co@F)aXmYZ0alnRY>PzeV*tECu})iRSy;^Xze
zouguuVF#o}7^-Ga!wMeYnDgGC{SYXYA(v&ghzLci*+Fp%@jI$*NToYS2?$qarskx8
z4pc(&5hyQ!Ff_{YN^=V;L8&pX06NoGl9*HqI#&QTp#tMWr9m4SL35`tX&4{A{<xqr
zEi*kQwFE8zn+`;>6&h=BB}jLlCKja^Bo-ATE+YkBh6;5p$YQuA_>NNrW!T;hs4~b7
zRE4BuJy3C!o0*rHR;d6wz7aHz4OIi$Jd4nnlCJ=2Z-MvK!h8l+0lU07KTQE*jxN|7
zkYB*g1l=$QbF!Wz^!CL7kOzu1kY?j-W7J(iNhh%+71T5a?QZ~GkgNw<<C$8JnUkMx
ztE8mlT9F947sN^-CNCr(RIDPM8I_r*kO-af1<A+c*(pHgeH0RlK_xfnG7-c0lmhT{
z4_rUE1ce`=Vg%a!jo^VcB;zwjLAOxBh@b+9DSr7SsgM&%A&ZNO72t=SgO^yK_*wyU
zg?v$B3QAb$D3pTN(;!(0nc+5!2b%!(m>a0B&Z*P^&9#DTOe_J-nOB1D_kbt|)f1po
zS3yyq2;IM?paD8t5Oi0m4tRqDsO1G!4RJ2w)^nIvP^F~+)di`IbaNH*!M6v4cUnLL
zA9SflQYy&lX{9-OF?pc+4zcJTv}7$K6&h`jdznEE$vg#@{A9>Io%+f7rA5W5@lc!e
zA+9n8ub%|%G}SApgg7|?Rbc|in~AU_0M!k4XP!cGMq*xiDs<x(=qk&Se1#Zwgp~>4
zFeoa`vjQb!cskO}RY0{v58@c`>FS^(lwebk=-Y0L^(^(l8@}|Db5lU&uO4XAO-!DW
z66gx*{Gwd&`3aCdqb)qM1SOV()(3c`<`lTW)M&=!L2{K|VoFLptmfB<QP(X5<&-?o
zWTI`1x;=;kTJaB>zz5YC;Ir4kBXT<6I~#3d)crwoSE;Z(2X0uvQ@%#3Ub>zYIC*Qv
zsN=R&7qq$*c9J;APS946Oweua;90o%lA==3OfSsJV9@SIP||{F(N9S&24AL91YR*u
z&?>}IS{(%<t%9D?2TuQ>tqkBT%B3Z##ds})oa;@bk06c*WhY3r10L`KU8$W2>fL}2
zGs{m&#cLgC^DgKJ2X#W;ax5*$&joFq18ph;pP7y*0rfzOn?QFoftNR>q$ZW7r-P0g
z0NV^o$dFDWEXw1HQb8qbd~r!BsNbdm;p)IEe`snc1g+D9vLT@Y<v`sG<z=Sl<rk&K
zL&w>`hgL%7?x9yAmE`AuuH}V@!J3Ak)C%fTgKGmlaKZ+q>0;0*GpKq7t>%H=O$o})
zp!q~_&IIYxD+CFFn%f{@q$U775!-?{lIay9T3eullk~tRd%zSS=>u5^6V_1xdmFTY
z6m<SX8RYcjGEg%MrUbU+Q%?!`aBVCddzc2$n*O{J_&LVlrZmV-q}CBE3Y1{!9C~~n
zI5?Fw!F~t%7&g8EJxUXz4SYfhXhSMg6XfJjgeEL8gyegSfgL*q@YWFg=Uyu5D8SqT
zJNFW=V{{ah;I$rja7ItT8GO7CID0}1X~+&T=uwuSgLxnk2)~616z^zp4oR2_&=`mC
z;JTm*6^k~Q3{)SK3(6{>h=F8l(5(WX@rQWufQ+pITK2)rHn8}HCVb>Y^tPbLgSZZw
z!fe6m47T+Lek=skCALr+Y80&01XU5mddNjDQ~@m6AQZqvbRf3cLMVb~OMtu&!ytR&
zVc8d%15Uw^P*zp|9p(iZQ2<r%$h|JmHeXmb0x}y8ZC-*Jz2IgGXw_?3BJ2)u6g8Pe
zImvoCsmY)|H+0q*MZO}ZJTosHMF!kR1r6O6R957`8mi&>McGOD`Oy2CP>d)59Wh!A
zYTp;6!2AO8IoKR5YoMSrPoTLlWaTNK$qXbp`1E2lXl)b7K!~m2-A&-O3n-_8$`|NC
zLJ(<C|0>&1M*(!GGUO2ERPcF6pfggS84H%Y;O90&bb_n^>o(F+0F~IFq6(oGdfFkZ
z+(l>sX-i3oj!}mh8=nc9Esar+h3#a@HiY(@uxJ8VgsH&@w+5IkNajOMGsLY0WCxOR
z5Ep#>B5oa^>vR$BHvqMMLHQ0gT?;Y+)bRu>gB|cxlA5bmk`FsO3)QQraRv()oaUk`
z2hG5ODg|()=%LENynvjL3c(o$eB>3Vb_O5v44SzC&+I^x0BE@#NKCIHr?>)o)Gu_Y
zKg8vjr!7N_#f<-yc+l(ss18oi1F=B2l%?iqK$Yv2<byXH!|G#*=^*bwYYvD2I1#}N
z0_6<2dSsu0Qzs}{Le7yfG%zs0bPs6a6SSYcs5CD=MqL}UT`C(EOdxB(I5Q12(F`&Q
z;(QFpfK;WVz@}@#!z&P#(J|^ElMO%#3e@sNx*i_k6|DY4H4-%*k$nkL2f|3JB2c0R
zG#d#jiWF=WV5Jf~v_Sg6=ROx_q^5#aE5Q`&B_@|-mO%%@APxnEW^#TGc$OIy5{SH>
zk^;M!1S4EP=7G)D1KsVQQIub9YY4i|I;S)@&o((fN83;nGJd0A3kqR$e}U{Q$}h)O
zz(7MAGeSXr!eVVvez~?0E~`N~6lyn=0+|918Boyafo2_HNffIBltY2w!4EpWq@YB>
z7ktEqUw(-j=wc;sBL`Ic!*;4bf)_L_XJl-GY&xhehBXbaG)$p^4RR@@7oZeSlwX!v
z3|gC*18X&8=A+zX2OBxkD1o;ypotx13|J7f@C+^qaSlihVg;&x<WWJ0ypjT<`IH74
zh=C3(fd&J?<Eh}i>BXSE{oq4UKnrqV=U!-NYDOEz>VamUK;;d@>U0nn=`ecGJvSg(
z&>;-bR=UQqpkfI=8UZbukV+$Po3B^{Yz!=|AiM**H5@+T0oqwz47m;zRC-sYmMDO)
z1%)*(Ky#TfkYOC?ATMkL1*}s?p&%zUu{aexNCfI~f?SSrZ74K|?Ck86bQB;if_M_i
zL7sV_L0^Sp&@#E)M9`typrisC>@P|LNo5v;$1L<9!3@z}1fDVjt&J<qDFL601nGB`
zf||D)g)na`D`Y6xDpX|_XhdsdMuRt>#_A}5Sg^f=G3rnO#9q@_NJj*`(5n!VRf{z=
zVNHw-1zXC^f_MVtHqZ^t&>NmJpve;Iw~S~j9fMdM1rTirrHx{BK)D05R3H?#aR@Yk
zL5ocfM6416xdt><L9oFN>Px_y;}A9|t0P^}3)vw6Z=8b;Vny6x3F*<IH^9N?>A@P^
z5Dr9JMruw$d|FOoI`qIr$gPb~Lm*)fN`J^0+~!8<Mu3hP#L<QaD~7e!!4|_39OzUZ
zThO#}Drk^cFE76wR`DbjCue5HCqdG;tpaR(S`RWz4LxK|LkSeA?w~<Q@Ss&*iLFvf
zets?}>4RLG1lk7yx~UU#V4y-Y<gRG&IjqPQK>E#~L9k*C@X?!~twA|@pcAMHG&Dhn
zI~c^muB(S+(IilzqEH4}+7cfRJyAA39-1OkO5#CU^U@V;74q`SLFS}^{HLL$nxe0o
ztFIcVpz2|z>T9JM409>OR3-4%($o|y1(3mTW1z-@%718Us|2*I3KBEey6R8|L3`&=
zJy1bVmlITYfl?Jx{Q$lQOi2Nz2xLF1K8S5lcS487LF*nsDvNa#lt7npE9mNi(h_*6
z6*B5t4A~t8P5~e(a596e=T(CF9k$IRO$oG?A{BOtMKO4A-%3FVbdV~z-3jYBq6{YJ
zC_r|&Lfi+gpJ3f$*g+*IyTU+mlUM|5$$+8+dgvlVGx)ME=rR2e5%5}hNO7B5SPEK6
zkO(>_u($-WbfpM9ERdR*0@<1fx<U{dq>v&f7_JGl931SsBGjD(5HVPG0Hs+_@sOTc
z0@485cmP@r4LJ$}vV<HY2HyFJtOukIv{W3tstS}$U_}vzK_FEKBOsmx=>}nt9B3LB
zdN3Zy9B?xgBo5bum{h~KX%1!v#1(qQ`9<KZ7q%e%$mZ&SyK~S)4blX{&{7<v7NQ^G
zHqdGy@J$(5gA!_nGAuT15$9-t!VeO-uo4BP3UUY{I1CkR6`<!Gf(mAkGeFJBJV;-+
z9DLyrq74hp4zTVbysrz=rmRp1x(NYh6-cNs9y)P}a(5g^2DG~oE(e;Jg5FOCx>^C8
zZm^!Gf!4f(Wnswpo3^2XqAh%s6q*J>c7bqmeqKpt9&Dx^BvK4I;t_O6G_f~&z_KV<
zBQ(@O1{Hz}B9Pb7RzrX!l@*YrAqtTSt3n;vN$Sx4ZlNA%LlPv~q1qwaGC{_JwIk#}
zyDj0HlR<?%Y%W?sHPcE#wOC12BTw5<2kHPM2Sds)6zy1crxxnL+B=}L0g<fLgEf25
z#6er%s%sUL72FfScdkQ967XqTnb5<mpsRu)J_8Ljfs(pj2($wQ5iX1et?~oyWdL6@
z1G<$7boCc#NCwpZ(1WEtZ~+Sv0rj$=<0!Bp1&}aYp&t63V9=2rkage<ppc!Cs9Uu`
z;f;LEg@UqzM`Cg|(!wdo<`J*~pnL{xOhK9?psQVsKt(pF$qP=-wqS)2o3P*Y0^WL}
zV5<ODNQ{X{;fHd%5!RbzsuXM$;Qc0$&y~U9mxZm*2n|f+!WGi_0%bo?&>~l#;JgQq
z6VRTH{1WhCMPP|6Xr=_|Ldua4<3P^9%$Lw^AxILU3}h>gG=XFtXkjJfggH?01*+J=
zv*_UIGuXlt&}uEv{&&c_PH=XD4S^!8gRr10VL|T3osh8l2z;&vsCS6imW0SW7@h;!
z2HqtPaxs!RkT@8FmXV^LrGeF6kZNq3^+72a{e}xzm7WRSVwVcP^$32P1UPYkdS`_S
znI+(*lb|u=;?i6Vh3G;fbmKrFUT6f)hu9+u-f#pt5)!K*76^kqk9I72i7hsd5IS`L
zVJ>)#7?eH0<JhplX80Y!3d#yD{(dp)AqqjRzW!mZ3Lzez!3sW}ey-Re08|=*hY2)d
z)Zs?NsB3CKhU}pu_nP42C7p9h^Rg>p78Y0Ll_XYxGD}fvVJUbz75l;tP&j}v!UrH8
zXyhK7gFtd1Ofb}OCKHfu5H2**0~M{vH8!dfK#D-P&<LXt#W>FeYtja324Q8$Fe|K_
zfLaSmQ=WO5CE&|qK}#Z1;ng>M&J^U!;*7*1P%|5LGY|Gm3$h0~l;jFt%$k{>2O1TC
z4IqK!F}(-P=+K@e!61Vwhx85!ss>pJscj)6J|I2_V>$yQ3c^s;wn|#Yup<XS!h|%W
zDTRa1zy-x7I50t>1q*D@#YEt;t`^*ghpPeKNC!UYQ~|Ugsazp34|4AbtWQFW#mKvn
z2{}?J1iYdIw9y;nKG1p<@Sb0A84Y(Wc(rCuW_BuQ)oo&C4tNX)<P;DlXaz>VVaq`z
zm8qZuJW`>DqT|jf_zuuQbu=V~3AMhk<|dFP(19TE^FS29B{#JH209zIs08&q5Uj>P
zT%e?&1UrZed=3|=yAQGe-lPOAMAQbI`a$l&AK=4tLA5t{UI<h@K)Vlk4*o!d71Wi8
z<3B+055fo~xSNV-wJCZcKr4<wRiuKlLNNGh>Qn{Dp}O#*2;9E~9h?fT6hR9NL5tv^
zhs2_^XhE(6wHQzuj<yP*6N(g+(9V^DHqNs^Ep(6`NHZU)m5vd0M5kKpRVvsLcm*F1
z>iENIRFD+1n~_}s@)LNh0&8Ihi6nIWpe?Kl;EP+d;3vK5n&^U380eHY1<-bbycApb
z;0MSKNUUP(5`s)e?A#bVWziNqkOUhCf{kTDS|gyl{ECY~L!{6OO-BJzRoOx)jF=tn
zrz}E}6$x2K544&+GY>Sej}cg)bV^)7k2_z0vjy5AOoWa=#8wo5#y%jq1#8BHlsN&8
z!NK5@nV=I$pq<6=UOi|!6;dpL&TMscfzAqnJP8Rk5DT;p6#LPI2sI#eq#a<0&<hEu
zV$kV2$c1!yG2%>89R;KT2k2xCXp|YD9_}UZ$p?C%L51R=(maF$Bm>}Tb-_0_6{QwK
zCjTKWKxk1`fUC4s0IfAEFII4Xi=iY9B-7yvVPg0c=2k*3PXcMuC|A&eJ5y6ZA8wO^
zrY3ka9_|`D1p{z-tY82euLFfY%svHM1&~P~BjG|?3Wj<HC`KZ@2_B@0&Wi<&_=4xU
ztQ5*Yi!fl;fet+d3&2bR3BcGOg`m>87$jI258{FwnqV%Ng>-gEA@aBp%mjo(P==e}
zwj;%1u_0*x5x5$K4|Ek)=V=?(A|>2n(0=$dB~X(WW<{-jbvd#UXo@ogRZ>cNN?HmU
zc|MS;$xstQ8)+(lPeCg-Qb3A6h(6SIJYu9Bw)!MGO$ps~Itpn@)d=51+yj~#gm|7n
zQU^y6)ETf~fv#9o$OU!CVS<`4$HF|0q6#JmYSe+Y+Ci@Gg)Rlp%!BTRgSF4{i@~P>
zgO;B_nGm-^<za?_CY+N%Cmy9jmuTUh%7V}1U`_`i%>qGe0be=>O2MEu3aoJpow9(2
zL~dptXsIx0|7MB4z8UnaP3U2CU{M8K(6$GVA}yG7K~7N)XuvlyHz_4iAyY@8*b01}
zT_$+5Hh8+J7@R7K!DVJ9XgJvrlGngZ6^QE~=^#zXIUls+p#(fprLBPQpSA+@s63E$
z8c>JoC@4W#N?^Nm6qK|T!17Qh>nJEeSRi>NO;8UEVjGqerW69Y(iwDkmlAj#4qBvW
zYAflZNFWjjDA9om3+Mt$NS+4|wSaQ01}s_;MKAd9<CN5r#LOIU9tS5zXr6{nRzS_t
zQ9!s9R4zd<cpwJ07y>pd0^<|AEeE<b0=|1Dt2jRo&OzFLlT>8{7lv&7hI2uxAd~$`
zRpmvQC8-)9LqHRhItnGAyV}7wI>ELDf=gwE{G_zf;$%oC88krwJ01r-9t1mKKdH(H
zRO*4&HG+nOz-H+vD3zChrX7nF(va%rG|0j0aGNzjK?I)Y0uPtRLzZ6_Yh>mYz(!u-
z;~k)wf(AS2WSh*~0!ZqB4C3a3x>le?3@|gGw!$VX(!hmgbuN5*q860CKnWpD!Bznz
z3|fN$yX;0;Aq8~3A8h$BY}Y?*Pqnf_Nqzxr(+8*kDAp@TEXmM=sf&j?B0gRNWHM+G
zN_;$IZW(fyBj^}jO}%*V(rNf?F}kUs^<_kx1hOA)5@>H4qPGC|GN|Pfqn-<1Vicne
z9-9VF!b5`!6d3sZ1@a6en4m=vNDIU~sE<)b#6XQJoMw~gGd<8fo*Cdflt7k(FuKKH
zzkm`HND@6T(W8=R>rwnm$W)}@gN6_!MT3^ofDD4ASg0uYm`IpdRb~N5iC$D@fm<f%
z6hn|Vpb9hdVX8n%;z1)r@z6_-Y!x!|^_(h8Qj0zPHF7KCt1=6~)80_~LENO$v^4PC
zDUwR4G&m!p*{p%k1IpLspjD!Y#UL*rMW!-{56#U`vq0_zjZ_xp=a=Y!_AeEKP9(`m
z1#hCwFV-th%*ob34k0AtV0uB`1Lb58jlN|Rq$V>BvZg6DFQpjdb`2%Hf=ZMdKtZZt
zI1Q95kn(3%erBEq#CaeKklYQ^0iw&0bQGoLfXvW<B_DJX5L*mjc^}z>I4r2jEP#d_
z*nv6<W#DoUWH-FDi;hu;MoWx3s9}nT6VUoN@bXaDKpe6jB-<hSkOjc!NFu97$w3hH
zs6tR3pko$5YsT_Ri;`18dkzq#A1uFtB1R7pE3gw=(PM{T`hW#AEI89J4p{*i0g659
zB@=iZk(q-$lnaUv7=|gq5u>Cf8KgMDVIC-HD}X9gJP9vG9dfN{u|gVh6$^7Qj7G$F
z87Lj4rGb}H!_p2~5sl3sX`to-ytYGfFv5{(pmGAIYw)`O(S(AeE>MDkG*i+cZGzGQ
z(AtmqlKlAO#N2|?;=I%n_`a6>(vpJGlK7O&BJfTxSepS<cf;<Cz|uBTz`aohJgfv?
zp9Ed`23Z^j-{1nSXRxoSgAFu=6v2-ug*25BT_z~sR!IwXsV-Cmyfqd)$yN$(aKYLq
z5NTM05qUK(G%l1O2bO?B2|T$0*+~zX)BtaME-nU5HGuLA=&%M*0D}?-^x7h5%Q-VI
zC9^Cur8F@IT9bpXGBQ9aq9N{3a;j8-#0bbt(CnxlxWNZ5mLbPPl%*Ddc5H*>z;>c6
zYz5_Ba9a(hy+~GqoC&oSv<?<)^8+gHSd^-eT$GwvlA5BQo1X%@&k}TZ8O&2Tl?q9f
zpf$LlQX?_FC^Z$d;~ILbLr5s-!o<W}g`(1&)M8k;gDi(S61iQepbZ*e0WU+t3^Q2R
zLZSe)a0+&EC~9aU8wg%ih~#OghdlF=b4pV{fea2rkh>w_4mz|R8uK70f&w&KM*$qB
z7;BVJ>_M^vq%kiuw*Vz;E7&SPvmB(=QLF(v6%wQrh9SWbUzD1Y2wL(FzNis&ngo^#
z6WU<`RaPMPLvt*mQL2ezHIjcoc7dD*+6@8O1BziCq^82<7)VPAWDE#HjRG~nA=*HR
z2*iUm^+Ae27#d~K*`QJqqzly?@HwiW!TacJ@DxZkC_+HWK+A2iLEG6u0#HL@(c=_k
z0VG~=_d`HxKv-D;Y6_^sqooDqfKE9FSqJR_K}$8z&5_Vz6WmIGDuyLPWd&FlPr(+<
z1fK#{tO3;xx<D7SyVlkO-u?hBhliJ`pjms+Bob1>K@_-<I8RS4NiI(T)h3YOg3fAz
zRxZVZ*54!c3L&3X3JHJE%Gvmg#JrTuJovG1knT64^9|MyT0jIokQUVEk5Pv%T`Gew
ztyPLIQ__Yc2R+!j)yy>LzI}uU<ir_}hhXCwuyBHuV73ZMs>SiDneq8)@v51s#gKwc
zNmT>gQ?S8Ugfn%Zj)A6Qn0c_q47k;Z2s%*G&rXFE|A=Z7tPNC^Vm;3d(wzs#AR+=m
zJyK}M7lB5LFzOl5Sq-TrprSD)vjm)S!Mj=EDFC_=0aCO;s*lXP^qf?1d2R*Xq5(>j
zki@G5DuuzWgnASz90I!JHy2VjgUeLVX||vPYak^tWB{R94^*;3s&+^^jfWhyP?8Cn
zXaJ=N&}uPI$!Zu6Q2?6BsRY$)u!<eDTwX&7bh)jPCbURFs0H2SmzYzm0o!U)02=6m
zh8oC_VvQtFH%SjPAd`_=0gG0ca!3M#whc5u&e4P^u`S2}-7H#^nOp*184vY6Xtf&1
zxq8K^CGpTiqEP_brw4D?fRunzT1+0&3USa{MFTxcJp*vkhaEE>o|>9%s1XWU`372O
z3-dPUXnD}O0r1g}h&fqM(O;gLnyp}{rvN_%2Q-=j8vKt>Oi95wH4)@>h2&yG17p36
z{M=N~-X}xYkW^v{bN~|+-{9p=iN(dKMJ3QvG(pB0=@`L63%4R&(EacVrXYnNUqk8=
zh@~)Z27wE9V+{xm^Cft&9273j!3u^3h6)f_1!L&YHf(VOs8)^E0Ed*0f{}q{EMx@@
zsJ2X1NK^o|b#f4EXuwI{&;XhNilI~0$n_#<Q6)G?V%2%MAc?>b7MlvT3hEXH1}2uK
ziIxT^z&yp$#3I?i)YRO})XdZ@$=J}+1SDo;fi7y1Y+zw-VrFV?Y;IzjW@cz+VrF7#
zVv=NFU||lXP0TC|jLj?zKw^eqvBX5fG%$e4SeQX<GdD4_G&eCzHZ!#}0f~Whp{X$e
zsWD4|sW34&PBS$yO+q*xq~0RgAPwD3rfFstW|kn=nVFiUf?bC0!W5{T7En3`WR{tQ
z0ZcpzByM7kunWSnNH#!n705*v2BvAr5J>|A19MYzW201cUM^6kvsHq`yRi~47c4$N
z2QY%GIU-67v>4Rn<w6v^kU9(0p+_3312s?L<28A?pp73$QAnu2p~=e?;LXS+!Ysl7
z0=zy_!CXh!1I(Bi7(iGEBnZX-8$k?k64Qt0I=zBQbp2)S-~R1oVqgGa0gx6b{@<9!
z%)kKA5AmHb7TssW<i!_rGBSX$JX9x$`rjDHhNc_5#T#@y4SX{kx>IsDFFupX$;bf0
zVjxqX_<y6J03)hXkj!9Z11V!+U}4~AWMDWY$;80GFtr>c#=yWZrH31--#ev;OFyxo
xKp*U@DLuR>!wyqAdn6zp#cXO$>5+s;Ar9GryI@KW3n&Fm>0yN!TUuPI2LL>bUAX`N

literal 0
HcmV?d00001

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc
index 187e5df00be7311186b8ece8e68b5c995cc25dba..1f10f66e9b20dea24d1969426da24f2459b0e1f8 100644
GIT binary patch
delta 697
zcmeywK8u|<l$V!_fq{Wxb-a3_*+gF1051jxh7^V<h7`sWrWS@M#uVlhmKKI6rc~w>
z)@-ICw^WuCwp6AR_7wJJrr8X0nVK0H8B$qNI8qr?*i$%DxKg;&S(+J}nHU*Tc!C);
zc_((2O@7P7ThGJ5z)-@F!r08%#F)Yq%%I6!wTMeWK|#Sivn(}FAyFYGv$#Y-J+~4>
zt1A?xmXsFd6~n}n^Ye-`i%T-|(iQU46iPBu6^avcQx#HkQgc)DN{SUS^Aw6w%TkMq
zQx)=yQc{cb6kPSv^}yys#OqVzK?Z9?8|oP87{_WV6ldg@=A=OFQHVCyG14)NRnSO;
zS*Z!p{Y%_4w*X>`LRx8Fa!F=>o<dT7N~PaQ##_RE`6Y1a)D+jEqWmIF#wa$3Csr~P
zu`n<&{1VqsEGU>9&Zx)>;ubT3f}5>~ZE_=%EVfWdf`&**dA>qkX>L+#QL#coVuC_q
zUW!6OQi4KBMrJX>DNt_|XXX~<RKk2yl98HOq)=R%YXuGgJB7rQ6pcikBuxcdg+%Qn
zg(5}<1_r+(CI)Q=22G}0jET1xlfZ6;5UdOg47ZpQQ&K>225~P-5$9wZX0`g4AlFxk
z1%bmF6dQ>O#kq+&IpElP$;`mO@Dil^B|FFhP1alN@$o77$?@?;YzzzxMVugmIpX8<
z5_41I<8QGR6lLa>K<oqs4TJ^Ke~ZH=H$SB`C)Ez*(qd2wWnt#v;9zCrU=m>j0CU{E
A2LJ#7

literal 1010
zcmWIL<>g{vU|{GCRZpyCW?*;>;vi#o1_lNP1_p-WC<X?G6ox2<6vh;$7KSLs6y_9`
z7KSLM6xI~B7KSM16!sL37KSL6RMr&EY^I{HRJIhZROS@!6z*oG*$i`;ni&}xQrS{?
zQW;aYQ+QMOQuxzZni-p!7#UIof*CXgtJ1k#D-v@Ha#9tFOG;Bx^GXzw^HWmw6hbmG
zixtu`b5a#Dixm<}OY(CQOEQxab8;#b(o^$NixNvxQxwvQ@^ckRGE%_`ixo2Sz?{sy
z;*z4$<dXa%-IUCtR1l+*E140=f6NRF3?S?b@}dj_14BAPEn^LX3qvelEmI9sI%5r^
zI72OCAx8~kFvChlzm*J{%u#H)l{uNkCAZj$Qj1G-N^UV`7cns~Fo20);`)gN1^UUw
zh6cv^8Tq-X<@rU~hI$2+w|I(D%TkMqQ{zE;IY9P-2nHq=Mh-?ErXn^528LuLmob2x
z2@(W5O^bnnp@boY5fq9kOrWq&VNGYL1&OiMFr=`TFxD_MGd8m@GNdpBGiY-7Rq2Fe
zq$)tIQ~+72kXD+PT#}ier;wbO2bW0%%e@3y_7dcYm!Qyi$-=<EP{rw!SzMw}o>**E
zB>*)jMFHf6%=|n}rdzB9MVWaeV0UXW-eSxFdmlouGB7aQ5-H9E#U|Lh@r9+S#UQ%{
zK>h~#iUH(n#v)#juR-?0d=6rRFxc0i0IFdCML{q_5y**P<29LXF(%$(OagNt1lU66
z#FP|Kkoh3}42(q}yHFw<WCBPIZgvR+DAb^4Yck$qE-ua0WQ=0UDTWveQd|T!n=Q98
zzPL130?BwGkS96f<8u=;^Wx*H#3S=dixeO!LLom*!AL<{!AQYYp$O!ODzPAN<Y(rk
zD<mou=O*UlfWy6rlYxQZCCG-Cpp53H$$pDHK0YNsIX)g7u;B0kd!z_vU0z~tYJ7YV
oH!Q)SyOhHwH$SB`C)Ez*+hRTj1_l;(4h{}h4rUH!4ki&s0Q`8`(f|Me

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc
index 27631e59efcaa3ff32a7759c35922e5cb638d13d..870a12277bd7eac55d2382ab61dbf6bf9b568faa 100644
GIT binary patch
delta 221
zcmcb7n0fyZX5LU<UM>a(28Pw~>WPat@_tleoU!@868o;nB6k}m$KBJIeCcjfy|O|`
zMrN@>X0bw|LPlmvN@|`$NosM4o<c}Qs)9pCW=g7VN@{X`QDRAckwST9PL4uiPELNg
zLTO%RNqSLYN~%IhzCuxIZhl#+LP<s{L^GEH6eueMr$WT@OG^q$OB6Emz^aN%N>ftv
zN)%G$GK*4^OY(~<71HvH6ms*6QWY}u((;RP6H7Al^YkX?-&NflcW(<Li>ZNW(q^#-
Gdl>=Q{ZNts

delta 134
zcmdmgg!%GeX5LU<UM>a(28P~H^+dCcydRYqbvFN3V&BD^nOl%wRH9Inn3s~DJDK%v
zqf}99S!z*nYJ5&+afyyXZe@INX|9e!VoD0vWPcNh$&2o$P0qS2LqOT)XZL0>vX~i|
L8E<ZSu$K`4r&}?^

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
index 4b1b3d0..3543f1b 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
@@ -1,28 +1,19 @@
 """
 Example student code. This file is automatically generated from the files in the instructor-directory
 """
-####################
-# Question 1. Write a function reverse_list which accepts a list, and returns a new list
-# with the same elements but in opposite order.
-####################
-def reverse_list(mylist):
-    # TODO: Your solution here
-    result = []
-    for k in mylist:
-        result = [k] + result
+def reverse_list(mylist): 
+    """
+    Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
+    reverse_list([1,2,3]) should return [3,2,1] (as a list).
+    """
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
-    return result
-
-def simple_list_question():
-    print("The reverse list function can reverse a list")
-    l = [1, 2, 3, 4]
-    print("List was:", l, "reversed version", reverse_list(l))
-
-def add(a,b):
-    return a+b
-
-def my_sum(ls):
-    return sum(ls)
+def add(a,b): 
+    """ Given two numbers `a` and `b` this function should simply return their sum:
+    > add(a,b) = a+b """
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
index ef52953..7d4b431 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
@@ -1,15 +1,13 @@
 """
 Example student code. This file is automatically generated from the files in the instructor-directory
 """
-from unitgrade2.unitgrade2 import Report
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from unitgrade2.unitgrade2 import UTestCase, cache, hide
-import random
 
 class Week1(UTestCase):
     """ The first question for week 1. """
     def test_add(self):
-        from cs103.homework1 import reverse_list, my_sum, add
+        from cs103.homework1 import add
         self.assertEqualC(add(2,2))
         self.assertEqualC(add(-100, 5))
 
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
index ebdfa03..e7244c0 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
@@ -428,8 +428,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6063000000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f505b000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
index be58362..efd3403 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
@@ -430,8 +430,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import reverse_list, my_sum, add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f504f000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8a9f000000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/students/cs103/Report3_handin_0_of_20.token b/examples/example_docker/students/cs103/Report3_handin_0_of_20.token
index f702bb0c28a621263baf89d48d3ca583519a6a52..347cfbde41438ca1ba78ee4d4e7953e7b8ce3108 100644
GIT binary patch
delta 201
zcmX@Vgr#E%%Y;^eLjsHp0p83kA`BqF6ti(^3NtTLj8t%$`?r64nHU)MPG)0SHCdLG
zn<pr>Ait<YM<F9KC3SKSi}d7+ELxjgSZ^?b)#q+rd?uBXks-BNlYP4;JL6f_?X0|v
z2~13;Nz>E$8T(lbOw*F5|Kevn!^9Lb{e%Fc9atOOn(4xVjD`vzo8cyLGBSvPTm{A3
Z8UxuT>#<8ruMuPvV0y(ry-$$wAOI_@I4uAG

delta 240
zcmeBJ!g78I%Y;?|PC-V70B>d%5e5+8jMz9eg_)N#LMr&^N&mpFObiTPC$q7vVhc(w
z$S*3H+`}S0`4NlO<UST%x#Yy;j8q+kjLei&Uarhski0@sVqQvqE-x1^*YpECjN+SB
zS@$!7O?$p+g=`5YBST5^N4D)B*%;5V@}g+jzK4e~iHXU~a=HRPV?Rr3l7-py&HRjK
zm^dS*HwrM?fenE>Y5ExfMnid!E8*sHGBSvPd;rDU8at=U2r_Ci8465y7i2sL08y7l
A3IG5A

diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc
index 28480881792e704c174bfa27c008fa2e0e642016..a01edf9d5079a89fbce46b7ed3c9b3bedffaec9f 100644
GIT binary patch
delta 19
acmaFB{(zk;l$V!_fq{WxYWzm7Ys>&KS_Ikv

delta 19
acmaFB{(zk;l$V!_fq{WRDPkknHD&-Vg#=&#

diff --git a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc
index c296ad40721b7cfc0c42277ed6eca9f2b2158a2c..0edd5337296831e5e9223e6b60b503ad093cf02e 100644
GIT binary patch
delta 437
zcmX@dv4Dd&l$V!_fq{WxO1yfa*F;{~`Zxv#h7^Vr#vF!R#wf;IrYNRd<|t-HkQh@A
zOD<~^D<gwDLke>WOAA8^ODbbCa}-+&LkepSdlb7nLke38dkaGfdn!{ia}-A^X9`C)
zQ&ChZYbr-GLo*{ILkep!gC^%okd2zmw>U#XQj1HR6N^)Cu?3|T<QJ9PVr9w5Oi7)%
zQF`L@hsx{>3=Fq;5{rvdi%MJzOA~XPHJNU)6sP8-6@jEyG8FMne$AM}$Tm5G$=`sV
zfq|ij7nR@xaoHFc7+4q>7>Ypz2O|q(5lF;u@-?PpaRHD5K@cGXvW&GPvm__Ah;4EJ
zvx)^avqTse7(n91AR|C#aWNKw#BOoK$LA(y=EcWra!p>tEW=*J3^J5u@=<1GZ&nZ!
zq@hRx!~&@=0$U6+3CYPI*WTi=$<0qG%}KRm1UZ?9fq_AUk%viugM(3km4gug6^B%m

delta 472
zcmZ3$agKvGl$V!_fq{WRDMCH5X(F#|eGvl#LkdF*V-7<uV-zDJh|QG4l*=5&3}!Ru
zFyyjCvE;HwvF5Txv4O={a@eET-5F9?Q`lM<QrJ>inwg_GQW#QLayX+n-5FBYQ#e`}
zQaDl>o0+4yQn^z&vzdzWQkhe^ni-lwHn0RUXmY&-xk8hX;}%;`YC(Qc$;5Ti(wfY-
zI735Hi%Xmni&Jm0CMPCmq~2o5$V^G~n|SgeBgf>`j8YsN3=9k_8H)HOpJmKp<d|&5
z<gYKlz`#(%hf45+xa<rJ3~USx48<%A3=ABMER01Uk;yxmlEno<@<Jd&n1O-e7Hdgn
zNlt1J`($Hg6$@-ei83%SfXplgnFlh8i?IkKc8eoEJ~uHlFFszAXL3KY4AU*y$rG7H
zCZA^JN@E5Y53;(56~qGRyv0_On3s~D3wB76BuE%!N)gyOSnR#UVUwGmQks)$#|ZKV
W$frDvJd7fY983Zn9E<|29E<=vGg(#u

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc
index 6059825c3966108ca837436041e85c8db3a0d602..c2a10cbaa71b99e5be06729a818f59fbbb70813a 100644
GIT binary patch
delta 504
zcmeC@ddSHa%FD~ez`(#TJ61h0mw6(eOnn>!149Z!3S$mKE@Kp9E>jd!E^`z!BS?%X
zhb5Oaij|SUogsxeg{6fdg(a1-nK_Crg&~DChdqkjogsxSg}sF#g*}z2nK_Chl{1AS
zo2e)&l{J;4nW34Hks*aOm_d{CCCExm=3AVhA*sbB&WXjTx7dPG3-XIfZn3gtWTvD}
z+$cTq`CDZU1_p*Gp2Xtf)S?pC!qUVX=ao#iSc+3~(uzP*D;bLTCVyuvWn`P2%akh5
z&cMJ>#0es}K?Dzo;DZzVlRq+vaD!xuSwPmaOcr4FvJ(KwgZM>)AQnP7n8gngV`E@o
z0I4VjS;@i3!pOnO#8?Cp^_x7MIayo?q(~S<h%hiP++r=sEXheNVw)_;qGACy1wnw#
z6Juat04Xm9838hni?IkKc8eoEJ~uHlFFszAYjQ4&40{nX$WWHaZ7j;(tRN;xLy;ti
t1yWxGwisbJm<6)o7Kcr4eoARhsvRQ(14A(=97Gs-m<0GZ7zJ247y*UxTbBR;

delta 568
zcmaFJ+0Vrn%FD~ez`(#D6QQ2?muVuOOnngp149Z!3S$mKE@KoUBZ$qE!<5S$#SCUM
z=P=~5M6u+uMzQ9yMX`a!SaR5-*xeaYSX0<q7*g0$S(=%nI8qo=SaLX{INcdi*i$%K
z7*aS=8Jn4-xKg=OIJ23G@=}>oxtbZ8K{l`iGiY+X1UW;Kk>eIyP-;PbQOU%0($bpD
zw>U#XQj1HR6N^)Cu_h-bXQbX@$;eDe^_zI|tuiMA149%~VsUY5QHg6|X=0A^N~T*Z
z#i==IMIfn_3`P8tZ!(rLa!mGTO10x)U|=W$dA5iL#Nq`J{BS~mfq~%`e@SX_Nqk~T
zN_<9UN=j-TNKG*d$YD&AKQVdP3WAigfe0ZGfzS<Rfs80(XJB9esVD|n$HB<L$id3Q
z2o{}O#hffI4ALh8B1AzJvX*3)<fIm{PyWKJVgWV;L4b`DXJB9e=_v-82{Mk0u?Qq~
ziz7ZhH!(9WK3<b&asZ3W<T4h8G-i;wAm<jbf><CeQEWwtc`5n1U>6igfrLTYi$G=;
sA?yOPKo;KOu*uC&Da}c>V`N}pC<Y1fF!C^pFmf;n@Nh5+uyQa000+}-<^TWy

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc
index cf7a9b8f4dcaf0820e8bd567cf158fadda093cda..e31d630d54cae23263b1a11b45c49a35268949dd 100644
GIT binary patch
delta 53
zcmca}g!$YNX5LU<UM>a(28OBe>WQfvd1os#)^1*@%)V>$zPnCLOr}YbnI81B7?`Fd
KZ=U+#I3obyF%v5Q

delta 67
zcmX?ig!#@9X5LU<UM>a(1_q@F^~92myt9=VyEm^?X5Yn|nOl%wRH9Inn3s~DyLtUx
YXC@{y%gJBv_p_uXS(t5Ze{h@;0Qs&M%K!iX

diff --git a/examples/example_docker/students/cs103/report3.py b/examples/example_docker/students/cs103/report3.py
index 6eafc76..7d4b431 100644
--- a/examples/example_docker/students/cs103/report3.py
+++ b/examples/example_docker/students/cs103/report3.py
@@ -1,10 +1,8 @@
 """
 Example student code. This file is automatically generated from the files in the instructor-directory
 """
-from unitgrade2.unitgrade2 import Report
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from unitgrade2.unitgrade2 import UTestCase, cache, hide
-import random
 
 class Week1(UTestCase):
     """ The first question for week 1. """
diff --git a/examples/example_docker/students/cs103/report3_grade.py b/examples/example_docker/students/cs103/report3_grade.py
index 4b01996..efd3403 100644
--- a/examples/example_docker/students/cs103/report3_grade.py
+++ b/examples/example_docker/students/cs103/report3_grade.py
@@ -430,8 +430,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5069000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f8eb8600000000075732e'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f505b000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f805fc00000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
-- 
GitLab