diff --git a/autolab/autolab.py b/autolab/autolab.py
index 50586f4a4626bda3b5185222b2fea4cd944e3e75..801602c21304c2bad6f705b37c9baaacd5f4e9e7 100644
--- a/autolab/autolab.py
+++ b/autolab/autolab.py
@@ -4,6 +4,7 @@ cd ~/Autolab && bundle exec rails s -p 8000 --binding=0.0.0.0
 To remove my shitty image:
 docker rmi tango_python_tue
 """
+import inspect
 from zipfile import ZipFile
 import os
 from os.path import basename
@@ -11,6 +12,10 @@ import os
 import shutil
 from jinja2 import Environment, FileSystemLoader
 import glob
+import pickle
+from unitgrade2.unitgrade2 import Report
+import inspect
+from unitgrade_private2 import docker_helpers
 
 COURSES_BASE = "/home/tuhe/Autolab/courses/AutoPopulated"
 TEMPLATE_BASE = "/home/tuhe/Documents/unitgrade_private/autolab/lab_template"
@@ -39,7 +44,6 @@ def jj_handout(source, dest, data):
 
 
 def zipFilesInDir(dirName, zipFileName, filter):
-   # create a ZipFile object
    with ZipFile(zipFileName, 'w') as zipObj:
        # Iterate over all the files in directory
        for folderName, subfolders, filenames in os.walk(dirName):
@@ -50,43 +54,98 @@ def zipFilesInDir(dirName, zipFileName, filter):
                    # Add file to zip
                    zipObj.write(filePath, basename(filePath))
 
+def paths2report(base_path, report_file):
+    mod = ".".join(os.path.relpath(report_file[:-3], base_path).split(os.sep))
+    # f2 = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101"
+    # spec1 = importlib.util.spec_from_file_location("cs101", f2)
+    # cs101 = importlib.util.module_from_spec(spec1)
+    # spec1.loader.exec_module(cs101)
+
+    from importlib.machinery import SourceFileLoader
+    foo = SourceFileLoader(mod, report_file).load_module()
+    # return foo.Report1
+    # spec = importlib.util.spec_from_file_location(mod, report_file)
+    # foo = importlib.util.module_from_spec(spec)
+    # spec.loader.exec_module(foo)
+    for name, obj in inspect.getmembers(foo):
+        if inspect.isclass(obj): # and obj.__module__ == foo:
+            if obj.__module__ == foo.__name__: # and issubclass(obj, Report):
+                if issubclass(obj, Report):
+                    report = getattr(foo, name)
+                    return report
+    return None
+
+def run_relative(file, base):
+    relative = os.path.relpath(file, base)
+    mod = os.path.normpath(relative)[:-3].split(os.sep)
+    os.system(f"cd {base} && python -m {'.'.join(mod)}")
+
+
+import inspect
+
+# class Example:
+#     @property
+#     def title(self):
+#         stack = inspect.stack()
+#         return stack[1].function.__doc__
+#     @title.setter
+#     def title(self, value):
+#         stack = inspect.stack()
+#         stack[1].function.__doc__ = value
+#         # self._title = value
+#
+#     def myfun(self):
+#         self.title = 234
+#         self.title
+#
+#         return 3
+
+
 def deploy_assignment(base_name):
-    docker_build_image()
 
-    # Ok so what should go on here?
+    docker_build_image()
     LAB_DEST = os.path.join(COURSES_BASE, base_name)
-    STUDENT_HANDOUT_DIR = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101"
-    INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1_grade.py"
-    # STUDENT_TOKEN_FILE  = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101"
 
-    from cs101.report1 import Report1 # The instructors report class.
-    StudentReportClass = Report1.__qualname__ + "_handin.token"
-    import inspect
-    # inspect.getfile(Report1.pack_imports[0])
-    m = Report1.pack_imports[0]
-    root, relative = Report1()._import_base_relative()
+    INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1_grade.py"
+    INSTRUCTOR_BASE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor"
 
+    STUDENT_BASE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students"
+    STUDENT_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101/report1_grade.py"
 
-    z = 234
+    # STUDENT_HANDOUT_DIR = os.path.dirname(STUDENT_GRADE_FILE) #"/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101"
+    # INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1.py"
+    # Make instructor token file.
 
+    # Get the instructor result file.
+    run_relative(INSTRUCTOR_GRADE_FILE, INSTRUCTOR_BASE)
+    f = glob.glob(os.path.dirname(INSTRUCTOR_GRADE_FILE) + "/*.token")[0]
+    with open(f, 'rb') as f:
+        res = pickle.load(f)
 
+    # Now we have the instructor token file. Let's get the student token file.
+    problems = [dict(name='Total', description='', max_score=res['total'][1], optional='false')]
+    for k, q in res['details'].items():
+        problems.append(dict(name=q['title'], description='', max_score=q['possible'], optional='true'))
+    print(problems)
 
+    sc = [('Total', res['total'][0])] + [(q['title'], q['obtained']) for k, q in res['details'].items()]
+    ss = ", ".join( [f'"{t}": {s}' for t, s in sc] )
+    scores = '{"scores": {' + ss + '}}'
+    print(scores)
 
     # Quickly make student .token file to upload:
     # os.system(f"cd {os.path.dirname(STUDENT_HANDOUT_DIR)} && python -m cs101.{os.path.basename(INSTRUCTOR_GRADE_FILE)[:-3]}")
-    os.system(f"cd {STUDENT_HANDOUT_DIR} && python {os.path.basename(INSTRUCTOR_GRADE_FILE)}")
-
-    STUDENT_TOKEN_FILE = glob.glob(STUDENT_HANDOUT_DIR + "/*.token")[0]
+    # os.system(f"cd {STUDENT_HANDOUT_DIR} && python {os.path.basename(INSTRUCTOR_GRADE_FILE)}")
+    # handin_filename = os.path.basename(STUDENT_TOKEN_FILE)
 
-    tname = os.path.basename(STUDENT_TOKEN_FILE)
+    run_relative(STUDENT_GRADE_FILE, STUDENT_BASE)
+    STUDENT_TOKEN_FILE = glob.glob(os.path.dirname(STUDENT_GRADE_FILE) + "/*.token")[0]
+    handin_filename = os.path.basename( STUDENT_TOKEN_FILE)
     for _ in range(3):
-        tname = tname[:tname.rfind("_")]
-    tname += ".token"
-    print("> Name of handin file", tname)
+        handin_filename = handin_filename[:handin_filename.rfind("_")]
+    handin_filename += ".token"
 
-    # Take student handout and unzip it.
-    # Take student token file, unzip it and merge files.
-    # This is the directory which is going to be handed out.
+    print("> Name of handin file", handin_filename)
     if os.path.exists(LAB_DEST):
         shutil.rmtree(LAB_DEST)
     os.mkdir(LAB_DEST)
@@ -94,61 +153,45 @@ def deploy_assignment(base_name):
 
     # Make the handout directory.
     # Start in the src directory. You should make the handout files first.
-    # jj("a", "b", data={})
-    src_dest = LAB_DEST + "/src"
-    # src_source = TEMPLATE_BASE + "/src"
-    os.mkdir(src_dest)
-
-    # unitgrade-docker
-    from unitgrade_private2 import docker_helpers
+    os.mkdir(LAB_DEST + "/src")
 
+    INSTRUCTOR_REPORT_FILE = INSTRUCTOR_GRADE_FILE[:-9] + ".py"
+    a = 234
+    # /home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1.py"
     data = {
             'base_name': base_name,
-            'nice_name': base_name + "please",
-            # 'autograde_image': 'autograde_image',
+            # 'nice_name': base_name + "please",
+            'display_name': paths2report(INSTRUCTOR_BASE, INSTRUCTOR_REPORT_FILE).title,
+            'handin_filename': handin_filename,
             'autograde_image': 'tango_python_tue',
-            'src_files_to_handout': ['driver_python.py','student_sources.zip', tname, os.path.basename(docker_helpers.__file__),
+            'src_files_to_handout': ['driver_python.py', 'student_sources.zip', handin_filename, os.path.basename(docker_helpers.__file__),
                                      os.path.basename(INSTRUCTOR_GRADE_FILE)], # Remove tname later; it is the upload.
-            'handin_filename': 'hello3.c', # the student token file.
-            'student_token_file': tname,
             'instructor_grade_file': os.path.basename(INSTRUCTOR_GRADE_FILE),
-            'grade_file_relative_destination': relative,
+            'grade_file_relative_destination': os.path.relpath(INSTRUCTOR_GRADE_FILE, INSTRUCTOR_BASE),
+            'problems': problems,
             }
 
     # shutil.copyfile(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml")
     jj_handout(TEMPLATE_BASE + "/src/README", LAB_DEST + "/src/README", data)
     jj_handout(TEMPLATE_BASE + "/src/driver_python.py", LAB_DEST + "/src/driver_python.py", data)
-    jj_handout(TEMPLATE_BASE + "/src/hello.c", LAB_DEST + "/src/hello3.c",data)
     jj_handout(TEMPLATE_BASE + "/src/Makefile", LAB_DEST + "/src/Makefile",data)
     jj_handout(TEMPLATE_BASE + "/src/driver.sh", LAB_DEST + "/src/driver.sh",data)
 
     jj(TEMPLATE_BASE + "/Makefile", LAB_DEST + "/Makefile", data)
-    shutil.copyfile(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml")
-    shutil.copyfile(TEMPLATE_BASE + "/autograde-Makefile", LAB_DEST + "/autograde-Makefile")
+    jj(TEMPLATE_BASE + "/autograde-Makefile", LAB_DEST + "/autograde-Makefile",data=data)
     jj(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml", data=data)
     jj(TEMPLATE_BASE + "/hello.rb", f"{LAB_DEST}/{base_name}.rb", data=data)
 
     # Copy the student grade file to remove.
     shutil.copyfile(INSTRUCTOR_GRADE_FILE, f"{LAB_DEST}/src/{os.path.basename(INSTRUCTOR_GRADE_FILE)}")
-    shutil.copyfile(STUDENT_TOKEN_FILE, f"{LAB_DEST}/src/{os.path.basename(STUDENT_TOKEN_FILE)}")
-    shutil.copyfile(STUDENT_TOKEN_FILE, f"{LAB_DEST}/src/{tname}")
-    # zipFilesInDir(STUDENT_HANDOUT_DIR, LAB_DEST + '/student_sources.zip', lambda name: True)
-    # Make a zip file of all the students (handed out) sources.
-    shutil.make_archive(LAB_DEST + '/src/student_sources', 'zip', root_dir=os.path.dirname(STUDENT_HANDOUT_DIR), base_dir='cs101')
-    # Take the (student) .token file and unpack sources into the student_sources.zip directory.
-    # docker_helpers
-
+    shutil.copyfile(STUDENT_TOKEN_FILE, f"{LAB_DEST}/src/{handin_filename}")
+    shutil.make_archive(LAB_DEST + '/src/student_sources', 'zip', root_dir=STUDENT_BASE, base_dir='cs101')
     shutil.copyfile(docker_helpers.__file__, f"{LAB_DEST}/src/{os.path.basename(docker_helpers.__file__)}")
-
-
     os.mkdir(LAB_DEST +"/handin")
     os.mkdir(LAB_DEST +"/test-autograder") # Otherwise make clean will screw up.
-
     os.system(f"cd {LAB_DEST} && make && cd {CURDIR}")
-
     print("Deploy", base_name)
 
 if __name__ == "__main__":
-    # print("Hello there handsome")
     print("Deploying to", COURSES_BASE)
-    deploy_assignment("hello3")
+    deploy_assignment("hello4")
diff --git a/autolab/lab_template/Makefile b/autolab/lab_template/Makefile
index 4178c87f45b0fd05cd4fc16f38b6c0720e3cc7a2..48023fea7db9f22c976bdfcfe840ebec896302fd 100644
--- a/autolab/lab_template/Makefile
+++ b/autolab/lab_template/Makefile
@@ -12,8 +12,8 @@ handout:
 	(rm -rf $(LAB)-handout; mkdir $(LAB)-handout)
 	cp -p src/Makefile-handout $(LAB)-handout/Makefile
 	cp -p src/README-handout $(LAB)-handout/README
-	cp -p src/hello3.c-handout $(LAB)-handout/hello3.c
-	cp -p src/driver.sh $(LAB)-handout
+	#	cp -p src/hello3.c-handout $(LAB)-handout/hello3.c
+	#cp -p src/driver.sh $(LAB)-handout
 {%- for f in src_files_to_handout %}
 	cp -p src/{{f}} $(LAB)-handout
 {% endfor %}
diff --git a/autolab/lab_template/autograde-Makefile b/autolab/lab_template/autograde-Makefile
index 8843390eba53111f210f9d8740453bd94a109717..55d8a7d5b5059d6cd28c8c3a3e157019e175e8d5 100644
--- a/autolab/lab_template/autograde-Makefile
+++ b/autolab/lab_template/autograde-Makefile
@@ -1,7 +1,7 @@
 all:
-	tar xvf autograde.tar
-	cp hello3.c hello3-handout
-	(cd hello3-handout; sh driver.sh)
+	tar xf autograde.tar
+	cp {{handin_filename}} {{base_name}}-handout
+	(cd {{base_name}}-handout; python3 driver_python.py)
 
 clean:
 	rm -rf *~ hello3-handout
diff --git a/autolab/lab_template/hello.rb b/autolab/lab_template/hello.rb
index a28c0262ddf576779fa1eed86aec569bdbb165ae..cb907a33eb23a37e78b819e735f6f15026eaf784 100644
--- a/autolab/lab_template/hello.rb
+++ b/autolab/lab_template/hello.rb
@@ -1,10 +1,10 @@
 require "AssessmentBase.rb"
 
-module Hello3
+module {{base_name|capitalize}}
   include AssessmentBase
 
   def assessmentInitialize(course)
-    super("hello3",course)
+    super("{{base_name}}",course)
     @problems = []
   end
 
diff --git a/autolab/lab_template/hello.yml b/autolab/lab_template/hello.yml
index b545df5f7b458729ada0d9cc99cce3638333be7d..f5df4d46cb62f783efef5694ddffc4ca139feaca 100644
--- a/autolab/lab_template/hello.yml
+++ b/autolab/lab_template/hello.yml
@@ -1,24 +1,33 @@
 ---
+
 general:
   name: {{ base_name }}
   description: ''
-  display_name: Hello3
-  handin_filename: hello3.c
+  display_name: {{ display_name }}
+  handin_filename: {{ handin_filename }}
   handin_directory: handin
   max_grace_days: 0
-  handout: hello3-handout.tar
-  writeup: writeup/hello3.html
+  handout: {{ base_name }}-handout.tar
+  writeup: writeup/{{base_name}}.html
   max_submissions: -1
   disable_handins: false
   max_size: 2
   has_svn: false
   category_name: Lab
 problems:
-- name: Correctness
-  description: ''
-  max_score: 100.0
-  optional: false
+{% for p in problems %}
+  - name: {{ p.name }}
+    description: '{{p.description}}'
+    max_score: {{p.max_score}}
+    optional: {{p.optional}}
+{% endfor %}
 autograder:
   autograde_timeout: 180
   autograde_image: {{ autograde_image }}
   release_score: true
+
+# problems:
+# - name: Correctness
+#  description: ''
+#  max_score: 100.0
+#  optional: false
\ No newline at end of file
diff --git a/autolab/lab_template/src/Makefile b/autolab/lab_template/src/Makefile
index d815a12396de018ab7430a77d469645519047ee8..6ca51a4d948b8e6fc738206a8f9b41051f3ffb1e 100644
--- a/autolab/lab_template/src/Makefile
+++ b/autolab/lab_template/src/Makefile
@@ -1,6 +1,7 @@
 # Makefile for the Hello Lab
-all: 
-	gcc hello3.c -o hello3
+all:
+	echo "Makefile called... it is empty so far. "
+	#gcc hello3.c -o hello3
 
 clean:
 	rm -rf *~ hello3
diff --git a/autolab/lab_template/src/Makefile-handout b/autolab/lab_template/src/Makefile-handout
index f68bf2618e4c5e8b5e81373621f8b1f85f21b750..6c57d658509d743106c3f37a6cbd9c69763e4b32 100644
--- a/autolab/lab_template/src/Makefile-handout
+++ b/autolab/lab_template/src/Makefile-handout
@@ -1,5 +1,6 @@
 # Student makefile for the Hello Lab
-all: 
+all:
+    echo "handout makefile called.."
 	gcc hello.c -o hello
 
 clean:
diff --git a/autolab/lab_template/src/driver.sh b/autolab/lab_template/src/driver.sh
index 2155ec80b0493e177351190f1df706221c060a22..1d6a6f74271325b503ca9a0319e40668f69219cb 100755
--- a/autolab/lab_template/src/driver.sh
+++ b/autolab/lab_template/src/driver.sh
@@ -10,25 +10,25 @@
 # python3 --version
 python3 driver_python.py
 
-(make clean; make)
-status=$?
-if [ ${status} -ne 0 ]; then
-    echo "Failure: Unable to compile hello3.c (return status = ${status})"
-    echo "{\"scores\": {\"Correctness\": 0}}"
-    exit
-fi
-
+#(make clean; make)
+#status=$?
+#if [ ${status} -ne 0 ]; then
+#    echo "Failure: Unable to compile hello3.c (return status = ${status})"
+#    echo "{\"scores\": {\"Correctness\": 0}}"
+#    exit
+#fi
+#
 # Run the code
-echo "Running ./hello3"
-./hello3
-status=$?
-if [ ${status} -eq 0 ]; then
-    echo "Success: ./hello3 runs with an exit status of 0"
-    echo "{\"scores\": {\"Correctness\": 100}}"
-else
-    echo "Failure: ./hello fails or returns nonzero exit status of ${status}"
-    echo "{\"scores\": {\"Correctness\": 0}}"
-fi
+#echo "Running ./hello3"
+#./hello3
+#status=$?
+#if [ ${status} -eq 0 ]; then
+#    echo "Success: ./hello3 runs with an exit status of 0"
+#    echo "{\"scores\": {\"Correctness\": 100}}"
+#else
+#    echo "Failure: ./hello fails or returns nonzero exit status of ${status}"
+#    echo "{\"scores\": {\"Correctness\": 0}}"
+#fi
 
 exit
 
diff --git a/autolab/lab_template/src/driver_python.py b/autolab/lab_template/src/driver_python.py
index a8798138d6d80c3384f5deeee449f7277a056eda..93ecb2c89dfc178f74f4507558d32ebddbe70ea6 100644
--- a/autolab/lab_template/src/driver_python.py
+++ b/autolab/lab_template/src/driver_python.py
@@ -1,19 +1,20 @@
-print("="*10)
-tag = "[driver_python.py]"
-print(tag, "I am going to have a chamor of a time grading your file!")
 import os
 import glob
-import shutil
 import sys
 import pickle
 # import io
+import subprocess
+import docker_helpers
 import time
+
+print("="*10)
+tag = "[driver_python.py]"
+print(tag, "I am going to have a chamor of a time evaluating your stuff")
+
 sys.stderr = sys.stdout
 wdir = os.getcwd()
 
 # print(os.system("cd"))
-
-
 def pfiles():
     print("> Files in dir:")
     for f in glob.glob(wdir + "/*"):
@@ -21,7 +22,7 @@ def pfiles():
     print("---")
 
 # shutil.unpack_archive("student_sources.zip")
-student_token_file = '{{student_token_file}}'
+student_token_file = '{{handin_filename}}'
 instructor_grade_script = '{{instructor_grade_file}}'
 grade_file_relative_destination = "{{grade_file_relative_destination}}"
 with open(student_token_file, 'rb') as f:
@@ -30,55 +31,49 @@ sources = results['sources'][0]
 pfiles()
 
 host_tmp_dir = wdir + "/tmp"
-import subprocess
-import docker_helpers
 print(f"{host_tmp_dir=}")
 print(f"{student_token_file=}")
 print(f"{instructor_grade_script=}")
 command, token = docker_helpers.student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade_script, grade_file_relative_destination)
-command = f"cd tmp && {command}"
-
+command = f"cd tmp && {command} --noprogress --autolab"
 
 def rcom(cm):
-    print(f"running... ", cm)
-    start = time.time()
+    # print(f"running... ", cm)
+    # start = time.time()
     rs = subprocess.run(cm, capture_output=True, text=True, shell=True)
-    print(rs)
-    print("result of running command was", rs.stdout, "err", rs.stderr, "time", time.time() - start)
-rcom("ls")
-rcom('python3 --version')
-rcom('python --version')
-
-
-
+    print(rs.stdout)
+    if len(rs.stderr) > 0:
+        print("There were errors in executing the file:")
+        print(rs.stderr)
+    # print(rs)
+    # print("result of running command was", rs.stdout, "err", rs.stderr, "time", time.time() - start)
 
 start = time.time()
 rcom(command)
-# print("Calling sub process...")
-# result = subprocess.run(command.split(), capture_output=True, text=True, shell=True).stdout
-# print("result of running command was", result, "time", time.time() - start)
-
-
-import time
-time.sleep(1)
-# print("> Files in dir:")
-pfiles()
-for f in glob.glob(host_tmp_dir + "/cs101/*"):
-    print("cs101/", f)
-print("---")
+# pfiles()
+# for f in glob.glob(host_tmp_dir + "/cs101/*"):
+#     print("cs101/", f)
+# print("---")
 
 print(f"{token=}")
 ls = glob.glob(token)
-print(ls)
+# print(ls)
 f = ls[0]
 with open(f, 'rb') as f:
     results = pickle.load(f)
-print("results")
+# print("results")
+# print(results.keys())
 print(results['total'])
-
 # if os.path.exists(host_tmp_dir):
 #     shutil.rmtree(host_tmp_dir)
 # with io.BytesIO(sources['zipfile']) as zb:
 #     with zipfile.ZipFile(zb) as zip:
 #         zip.extractall(host_tmp_dir
-print("="*10)
\ No newline at end of file
+# print("="*10)
+# print('{"scores": {"Correctness": 100,  "Problem 1": 4}}')
+
+sc = [('Total', results['total'][0])] + [(q['title'], q['obtained']) for k, q in results['details'].items()]
+ss = ", ".join([f'"{t}": {s}' for t, s in sc])
+scores = '{"scores": {' + ss + '}}'
+print('{"_presentation": "semantic"}')
+print(scores)
diff --git a/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-39.pyc b/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59aec17144bee92c76e761ed5a8bcfd0b24ba30c
Binary files /dev/null and b/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-39.pyc differ
diff --git a/examples/example_framework/instructor/cs102/report2.py b/examples/example_framework/instructor/cs102/report2.py
index 832a117873c46bfa3512bbefa1166edaf1ca8147..1b00e8413534498cb492b01ec4bfc63c7e431f17 100644
--- a/examples/example_framework/instructor/cs102/report2.py
+++ b/examples/example_framework/instructor/cs102/report2.py
@@ -16,7 +16,6 @@ class Week1(UTestCase):
         self.assertEqualC(reverse_list([1,2,3]))
 
 
-
 class Question2(UTestCase):
     """ Second problem """
     @cache
diff --git a/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token b/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token
index 26332275bf4a15800ec28f4e3cde99ee1c23b810..0a688a52f5136ace5fb186affec815bdeb67c268 100644
Binary files a/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token and b/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token differ
diff --git a/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-39.pyc b/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..320b3cf336a65641c1c544822f0a7b397f2c8859
Binary files /dev/null and b/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-39.pyc differ
diff --git a/examples/example_simplest/instructor/cs101/report1_grade.py b/examples/example_simplest/instructor/cs101/report1_grade.py
index d844649a19b8ec65fbfd2a0142364b1e978a59dc..1a8a049ab1900d22c089909ffc8b259f8cd75d73 100644
--- a/examples/example_simplest/instructor/cs101/report1_grade.py
+++ b/examples/example_simplest/instructor/cs101/report1_grade.py
@@ -40,8 +40,6 @@ parser.add_argument('--showcomputed',  action="store_true",  help='Show the answ
 parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
 parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
 
-
-
 def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):
     args = parser.parse_args()
     if question is None and args.q is not None:
@@ -138,13 +136,24 @@ class UnitgradeTextRunner(unittest.TextTestRunner):
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
 
+class SequentialTestLoader(unittest.TestLoader):
+    def getTestCaseNames(self, testCaseClass):
+        test_names = super().getTestCaseNames(testCaseClass)
+        testcase_methods = list(testCaseClass.__dict__.keys())
+        test_names.sort(key=testcase_methods.index)
+        return test_names
 
 def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
                     show_progress_bar=True,
-                    show_tol_err=False):
+                    show_tol_err=False,
+                    big_header=True):
+
     now = datetime.now()
-    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
-    b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    if big_header:
+        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
+        b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    else:
+        b = "Unitgrade"
     print(b + " v" + __version__)
     dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
     print("Started: " + dt_string)
@@ -157,17 +166,7 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
     nL = 80
     t_start = time.time()
     score = {}
-
-    # Use the sequential test loader instead. See here:
-    class SequentialTestLoader(unittest.TestLoader):
-        def getTestCaseNames(self, testCaseClass):
-            test_names = super().getTestCaseNames(testCaseClass)
-            testcase_methods = list(testCaseClass.__dict__.keys())
-            test_names.sort(key=testcase_methods.index)
-            return test_names
     loader = SequentialTestLoader()
-    # loader = unittest.TestLoader()
-    # loader.suiteClass = MySuite
 
     for n, (q, w) in enumerate(report.questions):
         # q = q()
@@ -188,6 +187,8 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
         # unittest.Te
         # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
         UTextResult.q_title_print = q_title_print # Hacky
+        UTextResult.show_progress_bar = show_progress_bar # Hacky.
+
         res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
         # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)
         z = 234
@@ -262,14 +263,15 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
         # ws, possible, obtained = upack(q_)
 
         possible = res.testsRun
-        obtained = possible - len(res.errors)
+        obtained = len(res.successes)
 
+        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
 
         # possible = int(ws @ possible)
         # obtained = int(ws @ obtained)
         # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
 
-        obtained = w * int(obtained * 1.0 / possible )
+        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
         score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle}
         q.obtained = obtained
         q.possible = possible
@@ -368,38 +370,55 @@ def gather_imports(imp):
             resources[v] = ff.read()
     return resources
 
+import argparse
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars')
+parser.add_argument('--autolab',  action="store_true",  help='Show Autolab results')
 
 def gather_upload_to_campusnet(report, output_dir=None):
-    n = 80
-    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)
+    n = report.nL
+    args = parser.parse_args()
+    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,
+                                          show_progress_bar=not args.noprogress,
+                                          big_header=not args.autolab)
     print(" ")
     print("="*n)
     print("Final evaluation")
     print(tabulate(table_data))
     # also load the source code of missing files...
 
-    if len(report.individual_imports) > 0:
-        print("By uploading the .token file, you verify the files:")
-        for m in report.individual_imports:
-            print(">", m.__file__)
-        print("Are created/modified individually by you in agreement with DTUs exam rules")
-        report.pack_imports += report.individual_imports
-
     sources = {}
-    if len(report.pack_imports) > 0:
-        print("Including files in upload...")
-        for k, m in enumerate(report.pack_imports):
-            nimp, top_package = gather_imports(m)
-            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
-            nimp['report_relative_location'] = report_relative_location
-            nimp['name'] = m.__name__
-            sources[k] = nimp
-            # if len([k for k in nimp if k not in sources]) > 0:
-            print(f"*** {m.__name__}")
-            # sources = {**sources, **nimp}
-    results['sources'] = sources
 
-    # json_str = json.dumps(results, indent=4)
+    if not args.autolab:
+        if len(report.individual_imports) > 0:
+            print("By uploading the .token file, you verify the files:")
+            for m in report.individual_imports:
+                print(">", m.__file__)
+            print("Are created/modified individually by you in agreement with DTUs exam rules")
+            report.pack_imports += report.individual_imports
+
+        if len(report.pack_imports) > 0:
+            print("Including files in upload...")
+            for k, m in enumerate(report.pack_imports):
+                nimp, top_package = gather_imports(m)
+                report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
+                nimp['report_relative_location'] = report_relative_location
+                nimp['name'] = m.__name__
+                sources[k] = nimp
+                # if len([k for k in nimp if k not in sources]) > 0:
+                print(f"*** {m.__name__}")
+                # sources = {**sources, **nimp}
+    results['sources'] = sources
 
     if output_dir is None:
         output_dir = os.getcwd()
@@ -414,10 +433,13 @@ def gather_upload_to_campusnet(report, output_dir=None):
     with open(token, 'wb') as f:
         pickle.dump(results, f)
 
-    print(" ")
-    print("To get credit for your results, please upload the single file: ")
-    print(">", token)
-    print("To campusnet without any modifications.")
+    if not args.autolab:
+        print(" ")
+        print("To get credit for your results, please upload the single file: ")
+        print(">", token)
+        print("To campusnet without any modifications.")
+
+        # print("Now time for some autolab fun")
 
 def source_instantiate(name, report1_source, payload):
     eval("exec")(report1_source, globals())
@@ -428,7 +450,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n    nL = 80 # Maximum line width\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _import_base_relative(self):\n        a ="234"\n        b = "234"\n        root_dir = self.pack_imports[0].__path__._path[0]\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        return root_dir, relative_path\n\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        self.show_progress_bar = show_progress_bar\n\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    show_progress_bar = True\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        # if hasattr(self, \'cc\'):\n        #     self.cc.terminate()\n        # self.cc_terminate(success=False)\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        self.cc_terminate(success=False)\n        # if self.showAll:\n        #     self.stream.writeln("FAIL")\n        # elif self.dots:\n        #     self.stream.write(\'F\')\n        #     self.stream.flush()\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        self.cc_terminate()\n\n\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n            # current = 1\n            # possible = 1\n            # current == possible\n            ss = "PASS" if success else "FAILED"\n            if tsecs >= 0.1:\n                ss += " (" + str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        # show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n\n        # test.countTestCases()\n\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if self.show_progress_bar or True:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        testcase_methods = list(testCaseClass.__dict__.keys())\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    sources = {}\n\n    if not args.autolab:\n        if len(report.individual_imports) > 0:\n            print("By uploading the .token file, you verify the files:")\n            for m in report.individual_imports:\n                print(">", m.__file__)\n            print("Are created/modified individually by you in agreement with DTUs exam rules")\n            report.pack_imports += report.individual_imports\n\n        if len(report.pack_imports) > 0:\n            print("Including files in upload...")\n            for k, m in enumerate(report.pack_imports):\n                nimp, top_package = gather_imports(m)\n                report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n                nimp[\'report_relative_location\'] = report_relative_location\n                nimp[\'name\'] = m.__name__\n                sources[k] = nimp\n                # if len([k for k in nimp if k not in sources]) > 0:\n                print(f"*** {m.__name__}")\n                # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    if not args.autolab:\n        print(" ")\n        print("To get credit for your results, please upload the single file: ")\n        print(">", token)\n        print("To campusnet without any modifications.")\n\n        # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
 report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e'
 name="Report1"
 
diff --git a/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token b/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token
new file mode 100644
index 0000000000000000000000000000000000000000..f5d1790d62c080547dc0c0940d993d6e89efd745
Binary files /dev/null and b/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token differ
diff --git a/examples/example_simplest/students/cs101/Report1_handin_10_of_10.token b/examples/example_simplest/students/cs101/Report1_handin_10_of_10.token
deleted file mode 100644
index 4e5f827930cba675185c66c49d883c1a2cd35046..0000000000000000000000000000000000000000
Binary files a/examples/example_simplest/students/cs101/Report1_handin_10_of_10.token and /dev/null differ
diff --git a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc
index 797cf46e4a10aa111b1af224256cb5ec15734be5..f4a5b8432432aa2fb0239a4f02152ac367b45b4c 100644
Binary files a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc and b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc differ
diff --git a/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-39.pyc b/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a8613dee80bdb366573f9b28d6ec044938c16aa
Binary files /dev/null and b/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-39.pyc differ
diff --git a/examples/example_simplest/students/cs101/deploy.py b/examples/example_simplest/students/cs101/deploy.py
deleted file mode 100644
index 3e9682d9aa6d9ffce1501d7826a8bd126779d75c..0000000000000000000000000000000000000000
--- a/examples/example_simplest/students/cs101/deploy.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from report1 import Report1
-from unitgrade_private2.hidden_create_files import setup_grade_file_report
-from snipper import snip_dir
-import shutil
-
-if __name__ == "__main__":
-    setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False)
-
-    # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper
-    snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py'])
-
-    # For my own sake, copy the homework to the other examples.
-    for f in ['../../../example_framework/instructor/cs102/homework1.py', '../../../example_docker/instructor/cs103/homework1.py']:
-        shutil.copy('homework1.py', f)
-
-
diff --git a/examples/example_simplest/students/cs101/homework1.py b/examples/example_simplest/students/cs101/homework1.py
index 286b79fbac40c2d02b5874c0a73fc387835ce2b3..3543f1ba46b63eec3a2c2e007ee998660c7136c6 100644
--- a/examples/example_simplest/students/cs101/homework1.py
+++ b/examples/example_simplest/students/cs101/homework1.py
@@ -1,14 +1,19 @@
-def reverse_list(mylist): #!f
+"""
+Example student code. This file is automatically generated from the files in the instructor-directory
+"""
+def reverse_list(mylist): 
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
     reverse_list([1,2,3]) should return [3,2,1] (as a list).
     """
-    return list(reversed(mylist))
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
-def add(a,b): #!f
+def add(a,b): 
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    return a+b
+    # TODO: 1 lines missing.
+    raise NotImplementedError("Implement function body")
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_simplest/students/cs101/report1.py b/examples/example_simplest/students/cs101/report1.py
index ea4f3b2c3381a0c8b4ec450570cf5f42fde7a070..a50ddcc74e292918fb3a9c7d86c48fbeb012b32b 100644
--- a/examples/example_simplest/students/cs101/report1.py
+++ b/examples/example_simplest/students/cs101/report1.py
@@ -1,3 +1,6 @@
+"""
+Example student code. This file is automatically generated from the files in the instructor-directory
+"""
 from unitgrade2.unitgrade2 import Report
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
 from cs101.homework1 import reverse_list, add
diff --git a/examples/example_simplest/students/cs101/report1_grade.py b/examples/example_simplest/students/cs101/report1_grade.py
index d844649a19b8ec65fbfd2a0142364b1e978a59dc..841e8a37a04e5bbfe929af1a743729f271f67cf7 100644
--- a/examples/example_simplest/students/cs101/report1_grade.py
+++ b/examples/example_simplest/students/cs101/report1_grade.py
@@ -1,4 +1,6 @@
-
+"""
+Example student code. This file is automatically generated from the files in the instructor-directory
+"""
 import numpy as np
 from tabulate import tabulate
 from datetime import datetime
@@ -40,8 +42,6 @@ parser.add_argument('--showcomputed',  action="store_true",  help='Show the answ
 parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
 parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
 
-
-
 def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):
     args = parser.parse_args()
     if question is None and args.q is not None:
@@ -138,13 +138,24 @@ class UnitgradeTextRunner(unittest.TextTestRunner):
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
 
+class SequentialTestLoader(unittest.TestLoader):
+    def getTestCaseNames(self, testCaseClass):
+        test_names = super().getTestCaseNames(testCaseClass)
+        testcase_methods = list(testCaseClass.__dict__.keys())
+        test_names.sort(key=testcase_methods.index)
+        return test_names
 
 def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
                     show_progress_bar=True,
-                    show_tol_err=False):
+                    show_tol_err=False,
+                    big_header=True):
+
     now = datetime.now()
-    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
-    b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    if big_header:
+        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
+        b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    else:
+        b = "Unitgrade"
     print(b + " v" + __version__)
     dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
     print("Started: " + dt_string)
@@ -157,17 +168,7 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
     nL = 80
     t_start = time.time()
     score = {}
-
-    # Use the sequential test loader instead. See here:
-    class SequentialTestLoader(unittest.TestLoader):
-        def getTestCaseNames(self, testCaseClass):
-            test_names = super().getTestCaseNames(testCaseClass)
-            testcase_methods = list(testCaseClass.__dict__.keys())
-            test_names.sort(key=testcase_methods.index)
-            return test_names
     loader = SequentialTestLoader()
-    # loader = unittest.TestLoader()
-    # loader.suiteClass = MySuite
 
     for n, (q, w) in enumerate(report.questions):
         # q = q()
@@ -188,6 +189,8 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
         # unittest.Te
         # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
         UTextResult.q_title_print = q_title_print # Hacky
+        UTextResult.show_progress_bar = show_progress_bar # Hacky.
+
         res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
         # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)
         z = 234
@@ -262,14 +265,15 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa
         # ws, possible, obtained = upack(q_)
 
         possible = res.testsRun
-        obtained = possible - len(res.errors)
+        obtained = len(res.successes)
 
+        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
 
         # possible = int(ws @ possible)
         # obtained = int(ws @ obtained)
         # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
 
-        obtained = w * int(obtained * 1.0 / possible )
+        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
         score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle}
         q.obtained = obtained
         q.possible = possible
@@ -368,38 +372,55 @@ def gather_imports(imp):
             resources[v] = ff.read()
     return resources
 
+import argparse
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars')
+parser.add_argument('--autolab',  action="store_true",  help='Show Autolab results')
 
 def gather_upload_to_campusnet(report, output_dir=None):
-    n = 80
-    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)
+    n = report.nL
+    args = parser.parse_args()
+    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,
+                                          show_progress_bar=not args.noprogress,
+                                          big_header=not args.autolab)
     print(" ")
     print("="*n)
     print("Final evaluation")
     print(tabulate(table_data))
     # also load the source code of missing files...
 
-    if len(report.individual_imports) > 0:
-        print("By uploading the .token file, you verify the files:")
-        for m in report.individual_imports:
-            print(">", m.__file__)
-        print("Are created/modified individually by you in agreement with DTUs exam rules")
-        report.pack_imports += report.individual_imports
-
     sources = {}
-    if len(report.pack_imports) > 0:
-        print("Including files in upload...")
-        for k, m in enumerate(report.pack_imports):
-            nimp, top_package = gather_imports(m)
-            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
-            nimp['report_relative_location'] = report_relative_location
-            nimp['name'] = m.__name__
-            sources[k] = nimp
-            # if len([k for k in nimp if k not in sources]) > 0:
-            print(f"*** {m.__name__}")
-            # sources = {**sources, **nimp}
-    results['sources'] = sources
 
-    # json_str = json.dumps(results, indent=4)
+    if not args.autolab:
+        if len(report.individual_imports) > 0:
+            print("By uploading the .token file, you verify the files:")
+            for m in report.individual_imports:
+                print(">", m.__file__)
+            print("Are created/modified individually by you in agreement with DTUs exam rules")
+            report.pack_imports += report.individual_imports
+
+        if len(report.pack_imports) > 0:
+            print("Including files in upload...")
+            for k, m in enumerate(report.pack_imports):
+                nimp, top_package = gather_imports(m)
+                report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
+                nimp['report_relative_location'] = report_relative_location
+                nimp['name'] = m.__name__
+                sources[k] = nimp
+                # if len([k for k in nimp if k not in sources]) > 0:
+                print(f"*** {m.__name__}")
+                # sources = {**sources, **nimp}
+    results['sources'] = sources
 
     if output_dir is None:
         output_dir = os.getcwd()
@@ -414,10 +435,13 @@ def gather_upload_to_campusnet(report, output_dir=None):
     with open(token, 'wb') as f:
         pickle.dump(results, f)
 
-    print(" ")
-    print("To get credit for your results, please upload the single file: ")
-    print(">", token)
-    print("To campusnet without any modifications.")
+    if not args.autolab:
+        print(" ")
+        print("To get credit for your results, please upload the single file: ")
+        print(">", token)
+        print("To campusnet without any modifications.")
+
+        # print("Now time for some autolab fun")
 
 def source_instantiate(name, report1_source, payload):
     eval("exec")(report1_source, globals())
@@ -428,10 +452,10 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n    nL = 80 # Maximum line width\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _import_base_relative(self):\n        a ="234"\n        b = "234"\n        root_dir = self.pack_imports[0].__path__._path[0]\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        return root_dir, relative_path\n\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        self.show_progress_bar = show_progress_bar\n\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    show_progress_bar = True\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        # if hasattr(self, \'cc\'):\n        #     self.cc.terminate()\n        # self.cc_terminate(success=False)\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        self.cc_terminate(success=False)\n        # if self.showAll:\n        #     self.stream.writeln("FAIL")\n        # elif self.dots:\n        #     self.stream.write(\'F\')\n        #     self.stream.flush()\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        self.cc_terminate()\n\n\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n            # current = 1\n            # possible = 1\n            # current == possible\n            ss = "PASS" if success else "FAILED"\n            if tsecs >= 0.1:\n                ss += " (" + str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        # show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n\n        # test.countTestCases()\n\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if self.show_progress_bar or True:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        testcase_methods = list(testCaseClass.__dict__.keys())\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    sources = {}\n\n    if not args.autolab:\n        if len(report.individual_imports) > 0:\n            print("By uploading the .token file, you verify the files:")\n            for m in report.individual_imports:\n                print(">", m.__file__)\n            print("Are created/modified individually by you in agreement with DTUs exam rules")\n            report.pack_imports += report.individual_imports\n\n        if len(report.pack_imports) > 0:\n            print("Including files in upload...")\n            for k, m in enumerate(report.pack_imports):\n                nimp, top_package = gather_imports(m)\n                report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n                nimp[\'report_relative_location\'] = report_relative_location\n                nimp[\'name\'] = m.__name__\n                sources[k] = nimp\n                # if len([k for k in nimp if k not in sources]) > 0:\n                print(f"*** {m.__name__}")\n                # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    if not args.autolab:\n        print(" ")\n        print("To get credit for your results, please upload the single file: ")\n        print(">", token)\n        print("To campusnet without any modifications.")\n\n        # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
 report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e'
 name="Report1"
 
 report = source_instantiate(name, report1_source, report1_payload)
 output_dir = os.path.dirname(__file__)
-gather_upload_to_campusnet(report, output_dir)
\ No newline at end of file
+gather_upload_to_campusnet(report, output_dir)
diff --git a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc
index 7a413eb7b3090c5d5d4bbccbf251cfec013c941c..5c2e1ae6b68171c81e7ddf38fb9d45d3f1968a79 100644
Binary files a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc and b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc differ
diff --git a/unitgrade_private2/hidden_gather_upload.py b/unitgrade_private2/hidden_gather_upload.py
index 3ff0f7d89409b960d608e496cbdc39015586ae9a..0fb11d89ed3559b389842ad21fb7cfb442a93093 100644
--- a/unitgrade_private2/hidden_gather_upload.py
+++ b/unitgrade_private2/hidden_gather_upload.py
@@ -66,38 +66,55 @@ def gather_imports(imp):
             resources[v] = ff.read()
     return resources
 
+import argparse
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars')
+parser.add_argument('--autolab',  action="store_true",  help='Show Autolab results')
 
 def gather_upload_to_campusnet(report, output_dir=None):
-    n = 80
-    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)
+    n = report.nL
+    args = parser.parse_args()
+    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,
+                                          show_progress_bar=not args.noprogress,
+                                          big_header=not args.autolab)
     print(" ")
     print("="*n)
     print("Final evaluation")
     print(tabulate(table_data))
     # also load the source code of missing files...
 
-    if len(report.individual_imports) > 0:
-        print("By uploading the .token file, you verify the files:")
-        for m in report.individual_imports:
-            print(">", m.__file__)
-        print("Are created/modified individually by you in agreement with DTUs exam rules")
-        report.pack_imports += report.individual_imports
-
     sources = {}
-    if len(report.pack_imports) > 0:
-        print("Including files in upload...")
-        for k, m in enumerate(report.pack_imports):
-            nimp, top_package = gather_imports(m)
-            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
-            nimp['report_relative_location'] = report_relative_location
-            nimp['name'] = m.__name__
-            sources[k] = nimp
-            # if len([k for k in nimp if k not in sources]) > 0:
-            print(f"*** {m.__name__}")
-            # sources = {**sources, **nimp}
-    results['sources'] = sources
 
-    # json_str = json.dumps(results, indent=4)
+    if not args.autolab:
+        if len(report.individual_imports) > 0:
+            print("By uploading the .token file, you verify the files:")
+            for m in report.individual_imports:
+                print(">", m.__file__)
+            print("Are created/modified individually by you in agreement with DTUs exam rules")
+            report.pack_imports += report.individual_imports
+
+        if len(report.pack_imports) > 0:
+            print("Including files in upload...")
+            for k, m in enumerate(report.pack_imports):
+                nimp, top_package = gather_imports(m)
+                report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)
+                nimp['report_relative_location'] = report_relative_location
+                nimp['name'] = m.__name__
+                sources[k] = nimp
+                # if len([k for k in nimp if k not in sources]) > 0:
+                print(f"*** {m.__name__}")
+                # sources = {**sources, **nimp}
+    results['sources'] = sources
 
     if output_dir is None:
         output_dir = os.getcwd()
@@ -112,10 +129,13 @@ def gather_upload_to_campusnet(report, output_dir=None):
     with open(token, 'wb') as f:
         pickle.dump(results, f)
 
-    print(" ")
-    print("To get credit for your results, please upload the single file: ")
-    print(">", token)
-    print("To campusnet without any modifications.")
+    if not args.autolab:
+        print(" ")
+        print("To get credit for your results, please upload the single file: ")
+        print(">", token)
+        print("To campusnet without any modifications.")
+
+        # print("Now time for some autolab fun")
 
 def source_instantiate(name, report1_source, payload):
     eval("exec")(report1_source, globals())