From e06673a1162086dc319e57cdaf49a7e1355e96e2 Mon Sep 17 00:00:00 2001
From: Tue Herlau <tuhe@dtu.dk>
Date: Sat, 28 Aug 2021 17:50:18 +0200
Subject: [PATCH] updates

---
 autolab/autolab.py                            | 154 ++++++++++++++++++
 autolab/docker_tango_python/Dockerfile        |  40 +++++
 autolab/docker_tango_python/requirements.txt  |   6 +
 autolab/lab_template/Makefile                 |  47 ++++++
 autolab/lab_template/autograde-Makefile       |   7 +
 autolab/lab_template/autograde.tar            | Bin 0 -> 10240 bytes
 autolab/lab_template/hello.rb                 |  11 ++
 autolab/lab_template/hello.yml                |  24 +++
 autolab/lab_template/hello/Makefile           |  44 +++++
 autolab/lab_template/hello/README             |  32 ++++
 autolab/lab_template/hello/autograde-Makefile |   7 +
 autolab/lab_template/hello/autograde.tar      | Bin 0 -> 10240 bytes
 autolab/lab_template/hello/hello-handout.tar  | Bin 0 -> 10240 bytes
 .../lab_template/hello/hello-handout/Makefile |   8 +
 .../lab_template/hello/hello-handout/README   |  15 ++
 .../hello/hello-handout/driver.sh             |  31 ++++
 .../lab_template/hello/hello-handout/hello.c  |   3 +
 autolab/lab_template/hello/hello.rb           |  11 ++
 autolab/lab_template/hello/hello.yml          |  24 +++
 autolab/lab_template/hello/src/Makefile       |   7 +
 .../lab_template/hello/src/Makefile-handout   |   8 +
 autolab/lab_template/hello/src/README         |  16 ++
 autolab/lab_template/hello/src/README-handout |  15 ++
 autolab/lab_template/hello/src/driver.sh      |  31 ++++
 autolab/lab_template/hello/src/hello.c        |   9 +
 .../lab_template/hello/src/hello.c-handout    |   3 +
 .../hello/test-autograder/Makefile            |   7 +
 .../hello/test-autograder/autograde.tar       | Bin 0 -> 10240 bytes
 .../hello/test-autograder/hello.c             |   9 +
 autolab/lab_template/hello/writeup/README     |   2 +
 autolab/lab_template/hello/writeup/hello.html |  13 ++
 autolab/lab_template/src/Makefile             |   7 +
 autolab/lab_template/src/Makefile-handout     |   8 +
 autolab/lab_template/src/README               |  16 ++
 autolab/lab_template/src/README-handout       |  15 ++
 autolab/lab_template/src/driver.sh            |  34 ++++
 autolab/lab_template/src/driver_python.py     |  84 ++++++++++
 autolab/lab_template/src/hello.c              |   9 +
 autolab/lab_template/src/hello.c-handout      |   3 +
 autolab/report_autolab.py                     |   0
 .../__pycache__/homework1.cpython-39.pyc      | Bin 0 -> 833 bytes
 .../cs103/__pycache__/report3.cpython-39.pyc  | Bin 0 -> 1053 bytes
 .../report3_complete.cpython-39.pyc           | Bin 0 -> 1246 bytes
 .../example_docker/instructor/cs103/deploy.py |   4 +-
 .../cs103/report3_complete_grade.py           |   2 +-
 .../instructor/cs103/report3_grade.py         |   2 +-
 .../instructor/cs103/unitgrade/Week1.pkl      | Bin 96 -> 96 bytes
 .../__pycache__/homework1.cpython-38.pyc      | Bin 922 -> 0 bytes
 .../unitgrade-docker/tmp/cs103/deploy.py      |  52 ++++++
 .../unitgrade-docker/tmp/cs103/homework1.py   |  13 +-
 .../unitgrade-docker/tmp/cs103/report3.py     |   5 +-
 .../tmp/cs103/report3_complete.py             |  25 +++
 .../tmp/cs103/report3_complete_grade.py       |   2 +-
 .../tmp/cs103/report3_grade.py                |   8 +-
 .../cs103/Report3_handin_20_of_20.token}      | Bin 138232 -> 140621 bytes
 .../__pycache__/homework1.cpython-38.pyc      | Bin 992 -> 833 bytes
 .../__pycache__/homework1.cpython-39.pyc      | Bin 0 -> 833 bytes
 .../cs103/__pycache__/report3.cpython-39.pyc  | Bin 0 -> 1053 bytes
 .../report3_complete.cpython-39.pyc           | Bin 0 -> 1246 bytes
 .../__pycache__/report3_grade.cpython-38.pyc  | Bin 57934 -> 0 bytes
 .../__pycache__/report3_grade.cpython-39.pyc} | Bin 57919 -> 57907 bytes
 .../example_docker/students/cs103/deploy.py   |  52 ++++++
 .../students/cs103/homework1.py               |  13 +-
 .../example_docker/students/cs103/report3.py  |   5 +-
 .../students/cs103/report3_complete.py        |  25 +++
 ..._of_20.token => report3_complete_grade.py} | Bin 70152 -> 67567 bytes
 .../students/cs103/report3_grade.py           |   8 +-
 .../students/cs103/unitgrade/Week1.pkl        | Bin 96 -> 96 bytes
 .../__pycache__/homework1.cpython-39.pyc      | Bin 0 -> 835 bytes
 .../cs101/__pycache__/report1.cpython-39.pyc  | Bin 0 -> 1221 bytes
 .../instructor/cs101/report1.py               |   2 +-
 .../instructor/cs101/report1_grade.py         |   2 +-
 .../cs101/Report1_handin_10_of_10.token       | Bin 0 -> 70101 bytes
 .../__pycache__/homework1.cpython-39.pyc      | Bin 0 -> 835 bytes
 .../cs101/__pycache__/report1.cpython-39.pyc  | Bin 0 -> 1221 bytes
 .../example_simplest/students/cs101/deploy.py |  16 ++
 .../students/cs101/homework1.py               |  13 +-
 .../students/cs101/report1.py                 |   5 +-
 .../students/cs101/report1_grade.py           |   8 +-
 .../__pycache__/__init__.cpython-39.pyc       | Bin 0 -> 944 bytes
 .../__pycache__/deployment.cpython-39.pyc     | Bin 0 -> 1425 bytes
 .../__pycache__/docker_helpers.cpython-39.pyc | Bin 0 -> 3217 bytes
 .../hidden_create_files.cpython-39.pyc        | Bin 0 -> 4665 bytes
 .../hidden_gather_upload.cpython-39.pyc       | Bin 0 -> 3274 bytes
 unitgrade_private2/docker_helpers.py          |  84 +++++++++-
 85 files changed, 1043 insertions(+), 63 deletions(-)
 create mode 100644 autolab/autolab.py
 create mode 100644 autolab/docker_tango_python/Dockerfile
 create mode 100644 autolab/docker_tango_python/requirements.txt
 create mode 100644 autolab/lab_template/Makefile
 create mode 100644 autolab/lab_template/autograde-Makefile
 create mode 100644 autolab/lab_template/autograde.tar
 create mode 100644 autolab/lab_template/hello.rb
 create mode 100644 autolab/lab_template/hello.yml
 create mode 100644 autolab/lab_template/hello/Makefile
 create mode 100644 autolab/lab_template/hello/README
 create mode 100644 autolab/lab_template/hello/autograde-Makefile
 create mode 100644 autolab/lab_template/hello/autograde.tar
 create mode 100644 autolab/lab_template/hello/hello-handout.tar
 create mode 100644 autolab/lab_template/hello/hello-handout/Makefile
 create mode 100644 autolab/lab_template/hello/hello-handout/README
 create mode 100755 autolab/lab_template/hello/hello-handout/driver.sh
 create mode 100644 autolab/lab_template/hello/hello-handout/hello.c
 create mode 100644 autolab/lab_template/hello/hello.rb
 create mode 100644 autolab/lab_template/hello/hello.yml
 create mode 100644 autolab/lab_template/hello/src/Makefile
 create mode 100644 autolab/lab_template/hello/src/Makefile-handout
 create mode 100644 autolab/lab_template/hello/src/README
 create mode 100644 autolab/lab_template/hello/src/README-handout
 create mode 100755 autolab/lab_template/hello/src/driver.sh
 create mode 100644 autolab/lab_template/hello/src/hello.c
 create mode 100644 autolab/lab_template/hello/src/hello.c-handout
 create mode 100644 autolab/lab_template/hello/test-autograder/Makefile
 create mode 100644 autolab/lab_template/hello/test-autograder/autograde.tar
 create mode 100644 autolab/lab_template/hello/test-autograder/hello.c
 create mode 100644 autolab/lab_template/hello/writeup/README
 create mode 100644 autolab/lab_template/hello/writeup/hello.html
 create mode 100644 autolab/lab_template/src/Makefile
 create mode 100644 autolab/lab_template/src/Makefile-handout
 create mode 100644 autolab/lab_template/src/README
 create mode 100644 autolab/lab_template/src/README-handout
 create mode 100755 autolab/lab_template/src/driver.sh
 create mode 100644 autolab/lab_template/src/driver_python.py
 create mode 100644 autolab/lab_template/src/hello.c
 create mode 100644 autolab/lab_template/src/hello.c-handout
 create mode 100644 autolab/report_autolab.py
 create mode 100644 examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-39.pyc
 create mode 100644 examples/example_docker/instructor/cs103/__pycache__/report3.cpython-39.pyc
 create mode 100644 examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-39.pyc
 delete mode 100644 examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc
 create mode 100644 examples/example_docker/instructor/unitgrade-docker/tmp/cs103/deploy.py
 create mode 100644 examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete.py
 rename examples/example_docker/{instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token => students/cs103/Report3_handin_20_of_20.token} (79%)
 create mode 100644 examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc
 create mode 100644 examples/example_docker/students/cs103/__pycache__/report3.cpython-39.pyc
 create mode 100644 examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc
 delete mode 100644 examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc
 rename examples/example_docker/{instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc => students/cs103/__pycache__/report3_grade.cpython-39.pyc} (90%)
 create mode 100644 examples/example_docker/students/cs103/deploy.py
 create mode 100644 examples/example_docker/students/cs103/report3_complete.py
 rename examples/example_docker/students/cs103/{Report3_handin_0_of_20.token => report3_complete_grade.py} (78%)
 create mode 100644 examples/example_simplest/instructor/cs101/__pycache__/homework1.cpython-39.pyc
 create mode 100644 examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-39.pyc
 create mode 100644 examples/example_simplest/students/cs101/Report1_handin_10_of_10.token
 create mode 100644 examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc
 create mode 100644 examples/example_simplest/students/cs101/__pycache__/report1.cpython-39.pyc
 create mode 100644 examples/example_simplest/students/cs101/deploy.py
 create mode 100644 unitgrade_private2/__pycache__/__init__.cpython-39.pyc
 create mode 100644 unitgrade_private2/__pycache__/deployment.cpython-39.pyc
 create mode 100644 unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc
 create mode 100644 unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc
 create mode 100644 unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc

diff --git a/autolab/autolab.py b/autolab/autolab.py
new file mode 100644
index 0000000..50586f4
--- /dev/null
+++ b/autolab/autolab.py
@@ -0,0 +1,154 @@
+"""
+cd ~/Autolab && bundle exec rails s -p 8000 --binding=0.0.0.0
+
+To remove my shitty image:
+docker rmi tango_python_tue
+"""
+from zipfile import ZipFile
+import os
+from os.path import basename
+import os
+import shutil
+from jinja2 import Environment, FileSystemLoader
+import glob
+
+COURSES_BASE = "/home/tuhe/Autolab/courses/AutoPopulated"
+TEMPLATE_BASE = "/home/tuhe/Documents/unitgrade_private/autolab/lab_template"
+CURDIR = os.path.dirname(__file__)
+
+def jj(source, dest, data):
+    if os.path.exists(dest) and os.path.samefile(source, dest):
+        raise Exception()
+    dir, f = os.path.split(source)
+    file_loader = FileSystemLoader(dir)
+    env = Environment(loader=file_loader)
+    output = env.get_template(f).render(data)
+    with open(dest, 'w') as f:
+        f.write(output)
+    return output
+
+
+def docker_build_image():
+    os.system("cd docker_tango_python && docker build --tag tango_python_tue .")
+    pass
+
+def jj_handout(source, dest, data):
+    out = jj(source, dest, data)
+    shutil.copy(dest, dest+"-handout")
+    return out
+
+
+def zipFilesInDir(dirName, zipFileName, filter):
+   # create a ZipFile object
+   with ZipFile(zipFileName, 'w') as zipObj:
+       # Iterate over all the files in directory
+       for folderName, subfolders, filenames in os.walk(dirName):
+           for filename in filenames:
+               if filter(filename):
+                   # create complete filepath of file in directory
+                   filePath = os.path.join(folderName, filename)
+                   # Add file to zip
+                   zipObj.write(filePath, basename(filePath))
+
+def deploy_assignment(base_name):
+    docker_build_image()
+
+    # Ok so what should go on here?
+    LAB_DEST = os.path.join(COURSES_BASE, base_name)
+    STUDENT_HANDOUT_DIR = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101"
+    INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1_grade.py"
+    # STUDENT_TOKEN_FILE  = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101"
+
+    from cs101.report1 import Report1 # The instructors report class.
+    StudentReportClass = Report1.__qualname__ + "_handin.token"
+    import inspect
+    # inspect.getfile(Report1.pack_imports[0])
+    m = Report1.pack_imports[0]
+    root, relative = Report1()._import_base_relative()
+
+
+    z = 234
+
+
+
+
+    # Quickly make student .token file to upload:
+    # os.system(f"cd {os.path.dirname(STUDENT_HANDOUT_DIR)} && python -m cs101.{os.path.basename(INSTRUCTOR_GRADE_FILE)[:-3]}")
+    os.system(f"cd {STUDENT_HANDOUT_DIR} && python {os.path.basename(INSTRUCTOR_GRADE_FILE)}")
+
+    STUDENT_TOKEN_FILE = glob.glob(STUDENT_HANDOUT_DIR + "/*.token")[0]
+
+    tname = os.path.basename(STUDENT_TOKEN_FILE)
+    for _ in range(3):
+        tname = tname[:tname.rfind("_")]
+    tname += ".token"
+    print("> Name of handin file", tname)
+
+    # Take student handout and unzip it.
+    # Take student token file, unzip it and merge files.
+    # This is the directory which is going to be handed out.
+    if os.path.exists(LAB_DEST):
+        shutil.rmtree(LAB_DEST)
+    os.mkdir(LAB_DEST)
+    assert os.path.exists(TEMPLATE_BASE)
+
+    # Make the handout directory.
+    # Start in the src directory. You should make the handout files first.
+    # jj("a", "b", data={})
+    src_dest = LAB_DEST + "/src"
+    # src_source = TEMPLATE_BASE + "/src"
+    os.mkdir(src_dest)
+
+    # unitgrade-docker
+    from unitgrade_private2 import docker_helpers
+
+    data = {
+            'base_name': base_name,
+            'nice_name': base_name + "please",
+            # 'autograde_image': 'autograde_image',
+            'autograde_image': 'tango_python_tue',
+            'src_files_to_handout': ['driver_python.py','student_sources.zip', tname, os.path.basename(docker_helpers.__file__),
+                                     os.path.basename(INSTRUCTOR_GRADE_FILE)], # Remove tname later; it is the upload.
+            'handin_filename': 'hello3.c', # the student token file.
+            'student_token_file': tname,
+            'instructor_grade_file': os.path.basename(INSTRUCTOR_GRADE_FILE),
+            'grade_file_relative_destination': relative,
+            }
+
+    # shutil.copyfile(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml")
+    jj_handout(TEMPLATE_BASE + "/src/README", LAB_DEST + "/src/README", data)
+    jj_handout(TEMPLATE_BASE + "/src/driver_python.py", LAB_DEST + "/src/driver_python.py", data)
+    jj_handout(TEMPLATE_BASE + "/src/hello.c", LAB_DEST + "/src/hello3.c",data)
+    jj_handout(TEMPLATE_BASE + "/src/Makefile", LAB_DEST + "/src/Makefile",data)
+    jj_handout(TEMPLATE_BASE + "/src/driver.sh", LAB_DEST + "/src/driver.sh",data)
+
+    jj(TEMPLATE_BASE + "/Makefile", LAB_DEST + "/Makefile", data)
+    shutil.copyfile(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml")
+    shutil.copyfile(TEMPLATE_BASE + "/autograde-Makefile", LAB_DEST + "/autograde-Makefile")
+    jj(TEMPLATE_BASE + "/hello.yml", f"{LAB_DEST}/{base_name}.yml", data=data)
+    jj(TEMPLATE_BASE + "/hello.rb", f"{LAB_DEST}/{base_name}.rb", data=data)
+
+    # Copy the student grade file to remove.
+    shutil.copyfile(INSTRUCTOR_GRADE_FILE, f"{LAB_DEST}/src/{os.path.basename(INSTRUCTOR_GRADE_FILE)}")
+    shutil.copyfile(STUDENT_TOKEN_FILE, f"{LAB_DEST}/src/{os.path.basename(STUDENT_TOKEN_FILE)}")
+    shutil.copyfile(STUDENT_TOKEN_FILE, f"{LAB_DEST}/src/{tname}")
+    # zipFilesInDir(STUDENT_HANDOUT_DIR, LAB_DEST + '/student_sources.zip', lambda name: True)
+    # Make a zip file of all the students (handed out) sources.
+    shutil.make_archive(LAB_DEST + '/src/student_sources', 'zip', root_dir=os.path.dirname(STUDENT_HANDOUT_DIR), base_dir='cs101')
+    # Take the (student) .token file and unpack sources into the student_sources.zip directory.
+    # docker_helpers
+
+    shutil.copyfile(docker_helpers.__file__, f"{LAB_DEST}/src/{os.path.basename(docker_helpers.__file__)}")
+
+
+    os.mkdir(LAB_DEST +"/handin")
+    os.mkdir(LAB_DEST +"/test-autograder") # Otherwise make clean will screw up.
+
+    os.system(f"cd {LAB_DEST} && make && cd {CURDIR}")
+
+    print("Deploy", base_name)
+
+if __name__ == "__main__":
+    # print("Hello there handsome")
+    print("Deploying to", COURSES_BASE)
+    deploy_assignment("hello3")
diff --git a/autolab/docker_tango_python/Dockerfile b/autolab/docker_tango_python/Dockerfile
new file mode 100644
index 0000000..47ede9f
--- /dev/null
+++ b/autolab/docker_tango_python/Dockerfile
@@ -0,0 +1,40 @@
+# syntax=docker/dockerfile:1
+
+FROM python:3.8-slim-buster
+MAINTAINER Autolab Team <autolab-dev@andrew.cmu.edu>
+
+RUN apt-get update && apt-get install -y \
+  build-essential \
+  gcc \
+  git \
+  make \
+  sudo \
+  python \
+  procps \
+  && rm -rf /var/lib/apt/lists/*
+
+# Install autodriver
+WORKDIR /home
+RUN useradd autolab
+RUN useradd autograde
+RUN mkdir autolab autograde output
+RUN chown autolab:autolab autolab
+RUN chown autolab:autolab output
+RUN chown autograde:autograde autograde
+RUN git clone --depth 1 https://github.com/autolab/Tango.git
+WORKDIR Tango/autodriver
+RUN make clean && make
+RUN cp autodriver /usr/bin/autodriver
+RUN chmod +s /usr/bin/autodriver
+# Do the python stuff.
+
+COPY requirements.txt requirements.txt
+RUN pip3 install -r requirements.txt
+
+# Clean up
+WORKDIR /home
+RUN apt-get remove -y git && apt-get -y autoremove && rm -rf Tango/
+
+# Check installation
+RUN ls -l /home
+RUN which autodriver
diff --git a/autolab/docker_tango_python/requirements.txt b/autolab/docker_tango_python/requirements.txt
new file mode 100644
index 0000000..9db6120
--- /dev/null
+++ b/autolab/docker_tango_python/requirements.txt
@@ -0,0 +1,6 @@
+numpy
+tqdm
+jinja2
+tabulate
+compress_pickle
+pyfiglet
diff --git a/autolab/lab_template/Makefile b/autolab/lab_template/Makefile
new file mode 100644
index 0000000..4178c87
--- /dev/null
+++ b/autolab/lab_template/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile to manage the example Hello Lab
+#
+
+# Get the name of the lab directory
+LAB = $(notdir $(PWD))
+
+all: handout handout-tarfile
+
+handout: 
+	# Rebuild the handout directory that students download
+	(rm -rf $(LAB)-handout; mkdir $(LAB)-handout)
+	cp -p src/Makefile-handout $(LAB)-handout/Makefile
+	cp -p src/README-handout $(LAB)-handout/README
+	cp -p src/hello3.c-handout $(LAB)-handout/hello3.c
+	cp -p src/driver.sh $(LAB)-handout
+{%- for f in src_files_to_handout %}
+	cp -p src/{{f}} $(LAB)-handout
+{% endfor %}
+
+handout-tarfile: handout
+	# Build *-handout.tar and autograde.tar
+	tar cvf $(LAB)-handout.tar $(LAB)-handout
+	cp -p $(LAB)-handout.tar autograde.tar
+
+clean:
+	# Clean the entire lab directory tree.  Note that you can run
+	# "make clean; make" at any time while the lab is live with no
+	# adverse effects.
+	rm -f *~ *.tar
+	(cd src; make clean)
+	(cd test-autograder; make clean)
+	rm -rf $(LAB)-handout
+	rm -f autograde.tar
+#
+# CAREFULL!!! This will delete all student records in the logfile and
+# in the handin directory. Don't run this once the lab has started.
+# Use it to clean the directory when you are starting a new version
+# of the lab from scratch, or when you are debugging the lab prior
+# to releasing it to the students.
+#
+cleanallfiles:
+	# Reset the lab from scratch.
+	make clean
+	rm -f log.txt
+	rm -rf handin/*
+
diff --git a/autolab/lab_template/autograde-Makefile b/autolab/lab_template/autograde-Makefile
new file mode 100644
index 0000000..8843390
--- /dev/null
+++ b/autolab/lab_template/autograde-Makefile
@@ -0,0 +1,7 @@
+all:
+	tar xvf autograde.tar
+	cp hello3.c hello3-handout
+	(cd hello3-handout; sh driver.sh)
+
+clean:
+	rm -rf *~ hello3-handout
diff --git a/autolab/lab_template/autograde.tar b/autolab/lab_template/autograde.tar
new file mode 100644
index 0000000000000000000000000000000000000000..978f9540c094621f867623c1448d6ee704fdd40d
GIT binary patch
literal 10240
zcmc~u&B@8v%}C5k$uBL@XP^l%FfcGMH#cW6FfcGQH#I<}K~f;hU}$V=YGPz$Vq$K<
zU|?u$WMa;sU`i9$pcz_PT#{I%pukX3Qk0#UgC>c@B_xlebSN=V<KH(iJ2fpcCzZNB
z07bBwnHh4t!_omr%D~Xr1Qh=!2Bzi)#ztlg28Jf)1||#&2Gn&1cGE_9{3|O2mz1WY
z=9MVqLgQW`Ex$;iBqLS915`99_#`HACFbN<DR6P7Cnqa_(~DlRf^I&9$;FkNlbV=k
z#l=~atDswyrl3^^li<Q0;G?30DFndr2`~SHTpe9}U8(IpP$tKh|IH1|(euBdvC(M$
zr*=GGGtUj2*)xk3auSnt6e{ye6^b+ROLI~b%8N2fQWX*vN;2~*6`U0cit^Ko5_5GF
zk`r@sQd1O^zzIh$SxJYhBqOmzp`a);ucR0%tfNq#UzC%gq)?KtP+S6PStumtrGN^R
zJcZ1>l+5JBlGI{_;?m^g)Z$`=q)M*Tip-MCymW=~%#sX+M1|s##FElth5R&ys??%<
zJua@0e1($K;u4Viia=&(TPbkmWagDt*nx{J1#lT;4Q5*_=;?#q2G&(nng_C0p*S%&
zRUxso1mua7R8aYrSfb!qT9TiWn50minUkYXTAZp-o{^ddvO}S`G$}W;#0qAIo_<PE
zW?5>HUU3E&ms@5|YOxg;B&#UEK}besu>z>><bqYAFe&H!+yamuNV%w&%mp_LssiQ?
zg!{O_KF~{sDo{{x%`GSaMFYqJ1#p0aq6+Nr)RfE;@{3xqMezI&cNw;%Lbezv|Kn-@
z8Jb|^e^Wz)(fm(#c$1=0Sy4YJGfzJ$5tOu*72pX#L02IpBUPa|Gq)fIl=Bd2Q=vRn
zAsJjmlw@S)Wh>;T=_z=YC?sd3CTACODJv*I(q}QGSy8NzoR|kMOyK1RL`^a@XXwC-
z2eAIsiqzy%PzeDk4)PR0#REp60J15xI59odN&zK*Dl0%U95~}A=clA{r6y<OD=0yP
zLB$L>(d#8EacO`WJ;<epCKsgau~o6>%1l#;R#2&i2-GU*=A|kaD8yQWjLqW$<xGgN
zZi$&WrA4V$3ZZ$4Nja&YG7su6ScX^7C`v6UEy{y=!4_evrV^4#)iFxN$@xX8#W6}&
z3e_=6&iO?}smUdIsl{LsgW6iC>Wa(~uCz=pE@g!vX!(ZZ&7jh}Jdh`$#h?-wl!3=*
zsfAd577Qt%LE#GyF@>Viykck>jS(pZP`81Dnt)FX4Zwa$%_&Yr@;*Eup$=9^OU%qE
zR>&_>fCOK$LSB9zs6s$b5AZl5!3kgofWi=zwz$YIKS{}JgHbIwV&UaKn5UOaWru>w
ze|+sf10yr^_MfSt@o4*x%F%$;IDIVzE(I+_Uk}97AGz59YZ#3Rj)uT!2#kinXb6mk
rz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinPz(V8s2Vf4

literal 0
HcmV?d00001

diff --git a/autolab/lab_template/hello.rb b/autolab/lab_template/hello.rb
new file mode 100644
index 0000000..a28c026
--- /dev/null
+++ b/autolab/lab_template/hello.rb
@@ -0,0 +1,11 @@
+require "AssessmentBase.rb"
+
+module Hello3
+  include AssessmentBase
+
+  def assessmentInitialize(course)
+    super("hello3",course)
+    @problems = []
+  end
+
+end
diff --git a/autolab/lab_template/hello.yml b/autolab/lab_template/hello.yml
new file mode 100644
index 0000000..b545df5
--- /dev/null
+++ b/autolab/lab_template/hello.yml
@@ -0,0 +1,24 @@
+---
+general:
+  name: {{ base_name }}
+  description: ''
+  display_name: Hello3
+  handin_filename: hello3.c
+  handin_directory: handin
+  max_grace_days: 0
+  handout: hello3-handout.tar
+  writeup: writeup/hello3.html
+  max_submissions: -1
+  disable_handins: false
+  max_size: 2
+  has_svn: false
+  category_name: Lab
+problems:
+- name: Correctness
+  description: ''
+  max_score: 100.0
+  optional: false
+autograder:
+  autograde_timeout: 180
+  autograde_image: {{ autograde_image }}
+  release_score: true
diff --git a/autolab/lab_template/hello/Makefile b/autolab/lab_template/hello/Makefile
new file mode 100644
index 0000000..b49f048
--- /dev/null
+++ b/autolab/lab_template/hello/Makefile
@@ -0,0 +1,44 @@
+#
+# Makefile to manage the example Hello Lab
+#
+
+# Get the name of the lab directory
+LAB = $(notdir $(PWD))
+
+all: handout handout-tarfile
+
+handout: 
+	# Rebuild the handout directory that students download
+	(rm -rf $(LAB)-handout; mkdir $(LAB)-handout)
+	cp -p src/Makefile-handout $(LAB)-handout/Makefile
+	cp -p src/README-handout $(LAB)-handout/README
+	cp -p src/hello.c-handout $(LAB)-handout/hello.c 
+	cp -p src/driver.sh $(LAB)-handout
+
+handout-tarfile: handout
+	# Build *-handout.tar and autograde.tar
+	tar cvf $(LAB)-handout.tar $(LAB)-handout
+	cp -p $(LAB)-handout.tar autograde.tar
+
+clean:
+	# Clean the entire lab directory tree.  Note that you can run
+	# "make clean; make" at any time while the lab is live with no
+	# adverse effects.
+	rm -f *~ *.tar
+	(cd src; make clean)
+	(cd test-autograder; make clean)
+	rm -rf $(LAB)-handout
+	rm -f autograde.tar
+#
+# CAREFULL!!! This will delete all student records in the logfile and
+# in the handin directory. Don't run this once the lab has started.
+# Use it to clean the directory when you are starting a new version
+# of the lab from scratch, or when you are debugging the lab prior
+# to releasing it to the students.
+#
+cleanallfiles:
+	# Reset the lab from scratch.
+	make clean
+	rm -f log.txt
+	rm -rf handin/*
+
diff --git a/autolab/lab_template/hello/README b/autolab/lab_template/hello/README
new file mode 100644
index 0000000..9b6008c
--- /dev/null
+++ b/autolab/lab_template/hello/README
@@ -0,0 +1,32 @@
+This is an example of the simplest possible autograded lab, called "hello." It uses 
+the recommended file format that we've found helpful in the past. 
+
+To build the lab:
+linux> make clean
+linux> make 
+
+To test offline:
+linux> cd test-autograder
+linux> make clean
+linux> make
+
+# Basic files created by the lab author
+Makefile                Builds the lab from src/
+README                  
+autograde-Makefile      Makefile that runs the autograder 
+src/                    Contains all src files and solutions         
+test-autograder/        For testing autograder offline
+writeup/                Lab writeup that students view from Autolab    
+
+# Files created by running make
+hello-handout/          The directory that is handed out to students, created
+                        using files from src/. 
+hello-handout.tar       Archive of hello-handout directory
+autograde.tar           File that is copied to the autograding instance 
+                        (along with autograde-Makefile and student handin file)
+
+# Files created and managed by Autolab
+handin/    All students handin files
+hello.rb   Config file
+hello.yml  Database properties that persist from semester to semester
+log.txt    Log of autograded submissions
diff --git a/autolab/lab_template/hello/autograde-Makefile b/autolab/lab_template/hello/autograde-Makefile
new file mode 100644
index 0000000..3b7ebba
--- /dev/null
+++ b/autolab/lab_template/hello/autograde-Makefile
@@ -0,0 +1,7 @@
+all:
+	tar xvf autograde.tar
+	cp hello.c hello-handout
+	(cd hello-handout; ./driver.sh)
+
+clean:
+	rm -rf *~ hello-handout
diff --git a/autolab/lab_template/hello/autograde.tar b/autolab/lab_template/hello/autograde.tar
new file mode 100644
index 0000000000000000000000000000000000000000..978f9540c094621f867623c1448d6ee704fdd40d
GIT binary patch
literal 10240
zcmc~u&B@8v%}C5k$uBL@XP^l%FfcGMH#cW6FfcGQH#I<}K~f;hU}$V=YGPz$Vq$K<
zU|?u$WMa;sU`i9$pcz_PT#{I%pukX3Qk0#UgC>c@B_xlebSN=V<KH(iJ2fpcCzZNB
z07bBwnHh4t!_omr%D~Xr1Qh=!2Bzi)#ztlg28Jf)1||#&2Gn&1cGE_9{3|O2mz1WY
z=9MVqLgQW`Ex$;iBqLS915`99_#`HACFbN<DR6P7Cnqa_(~DlRf^I&9$;FkNlbV=k
z#l=~atDswyrl3^^li<Q0;G?30DFndr2`~SHTpe9}U8(IpP$tKh|IH1|(euBdvC(M$
zr*=GGGtUj2*)xk3auSnt6e{ye6^b+ROLI~b%8N2fQWX*vN;2~*6`U0cit^Ko5_5GF
zk`r@sQd1O^zzIh$SxJYhBqOmzp`a);ucR0%tfNq#UzC%gq)?KtP+S6PStumtrGN^R
zJcZ1>l+5JBlGI{_;?m^g)Z$`=q)M*Tip-MCymW=~%#sX+M1|s##FElth5R&ys??%<
zJua@0e1($K;u4Viia=&(TPbkmWagDt*nx{J1#lT;4Q5*_=;?#q2G&(nng_C0p*S%&
zRUxso1mua7R8aYrSfb!qT9TiWn50minUkYXTAZp-o{^ddvO}S`G$}W;#0qAIo_<PE
zW?5>HUU3E&ms@5|YOxg;B&#UEK}besu>z>><bqYAFe&H!+yamuNV%w&%mp_LssiQ?
zg!{O_KF~{sDo{{x%`GSaMFYqJ1#p0aq6+Nr)RfE;@{3xqMezI&cNw;%Lbezv|Kn-@
z8Jb|^e^Wz)(fm(#c$1=0Sy4YJGfzJ$5tOu*72pX#L02IpBUPa|Gq)fIl=Bd2Q=vRn
zAsJjmlw@S)Wh>;T=_z=YC?sd3CTACODJv*I(q}QGSy8NzoR|kMOyK1RL`^a@XXwC-
z2eAIsiqzy%PzeDk4)PR0#REp60J15xI59odN&zK*Dl0%U95~}A=clA{r6y<OD=0yP
zLB$L>(d#8EacO`WJ;<epCKsgau~o6>%1l#;R#2&i2-GU*=A|kaD8yQWjLqW$<xGgN
zZi$&WrA4V$3ZZ$4Nja&YG7su6ScX^7C`v6UEy{y=!4_evrV^4#)iFxN$@xX8#W6}&
z3e_=6&iO?}smUdIsl{LsgW6iC>Wa(~uCz=pE@g!vX!(ZZ&7jh}Jdh`$#h?-wl!3=*
zsfAd577Qt%LE#GyF@>Viykck>jS(pZP`81Dnt)FX4Zwa$%_&Yr@;*Eup$=9^OU%qE
zR>&_>fCOK$LSB9zs6s$b5AZl5!3kgofWi=zwz$YIKS{}JgHbIwV&UaKn5UOaWru>w
ze|+sf10yr^_MfSt@o4*x%F%$;IDIVzE(I+_Uk}97AGz59YZ#3Rj)uT!2#kinXb6mk
rz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinPz(V8s2Vf4

literal 0
HcmV?d00001

diff --git a/autolab/lab_template/hello/hello-handout.tar b/autolab/lab_template/hello/hello-handout.tar
new file mode 100644
index 0000000000000000000000000000000000000000..978f9540c094621f867623c1448d6ee704fdd40d
GIT binary patch
literal 10240
zcmc~u&B@8v%}C5k$uBL@XP^l%FfcGMH#cW6FfcGQH#I<}K~f;hU}$V=YGPz$Vq$K<
zU|?u$WMa;sU`i9$pcz_PT#{I%pukX3Qk0#UgC>c@B_xlebSN=V<KH(iJ2fpcCzZNB
z07bBwnHh4t!_omr%D~Xr1Qh=!2Bzi)#ztlg28Jf)1||#&2Gn&1cGE_9{3|O2mz1WY
z=9MVqLgQW`Ex$;iBqLS915`99_#`HACFbN<DR6P7Cnqa_(~DlRf^I&9$;FkNlbV=k
z#l=~atDswyrl3^^li<Q0;G?30DFndr2`~SHTpe9}U8(IpP$tKh|IH1|(euBdvC(M$
zr*=GGGtUj2*)xk3auSnt6e{ye6^b+ROLI~b%8N2fQWX*vN;2~*6`U0cit^Ko5_5GF
zk`r@sQd1O^zzIh$SxJYhBqOmzp`a);ucR0%tfNq#UzC%gq)?KtP+S6PStumtrGN^R
zJcZ1>l+5JBlGI{_;?m^g)Z$`=q)M*Tip-MCymW=~%#sX+M1|s##FElth5R&ys??%<
zJua@0e1($K;u4Viia=&(TPbkmWagDt*nx{J1#lT;4Q5*_=;?#q2G&(nng_C0p*S%&
zRUxso1mua7R8aYrSfb!qT9TiWn50minUkYXTAZp-o{^ddvO}S`G$}W;#0qAIo_<PE
zW?5>HUU3E&ms@5|YOxg;B&#UEK}besu>z>><bqYAFe&H!+yamuNV%w&%mp_LssiQ?
zg!{O_KF~{sDo{{x%`GSaMFYqJ1#p0aq6+Nr)RfE;@{3xqMezI&cNw;%Lbezv|Kn-@
z8Jb|^e^Wz)(fm(#c$1=0Sy4YJGfzJ$5tOu*72pX#L02IpBUPa|Gq)fIl=Bd2Q=vRn
zAsJjmlw@S)Wh>;T=_z=YC?sd3CTACODJv*I(q}QGSy8NzoR|kMOyK1RL`^a@XXwC-
z2eAIsiqzy%PzeDk4)PR0#REp60J15xI59odN&zK*Dl0%U95~}A=clA{r6y<OD=0yP
zLB$L>(d#8EacO`WJ;<epCKsgau~o6>%1l#;R#2&i2-GU*=A|kaD8yQWjLqW$<xGgN
zZi$&WrA4V$3ZZ$4Nja&YG7su6ScX^7C`v6UEy{y=!4_evrV^4#)iFxN$@xX8#W6}&
z3e_=6&iO?}smUdIsl{LsgW6iC>Wa(~uCz=pE@g!vX!(ZZ&7jh}Jdh`$#h?-wl!3=*
zsfAd577Qt%LE#GyF@>Viykck>jS(pZP`81Dnt)FX4Zwa$%_&Yr@;*Eup$=9^OU%qE
zR>&_>fCOK$LSB9zs6s$b5AZl5!3kgofWi=zwz$YIKS{}JgHbIwV&UaKn5UOaWru>w
ze|+sf10yr^_MfSt@o4*x%F%$;IDIVzE(I+_Uk}97AGz59YZ#3Rj)uT!2#kinXb6mk
rz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinPz(V8s2Vf4

literal 0
HcmV?d00001

diff --git a/autolab/lab_template/hello/hello-handout/Makefile b/autolab/lab_template/hello/hello-handout/Makefile
new file mode 100644
index 0000000..f68bf26
--- /dev/null
+++ b/autolab/lab_template/hello/hello-handout/Makefile
@@ -0,0 +1,8 @@
+# Student makefile for the Hello Lab
+all: 
+	gcc hello.c -o hello
+
+clean:
+	rm -rf *~ hello
+
+
diff --git a/autolab/lab_template/hello/hello-handout/README b/autolab/lab_template/hello/hello-handout/README
new file mode 100644
index 0000000..c95cd25
--- /dev/null
+++ b/autolab/lab_template/hello/hello-handout/README
@@ -0,0 +1,15 @@
+For this lab, you should write a tiny C program, called "hello.c",
+that prints "hello, world" to stdout and then indicates success by
+exiting with a status of zero.
+
+To test your work: 
+linux> make clean; make; ./hello
+
+To run the same autograder that Autolab will use when you submit:
+linux> ./driver.sh
+
+Files:
+README          This file
+Makefile        Compiles hello.c
+driver.sh       Autolab autograder
+hello.c         Empty C file that you will edit
diff --git a/autolab/lab_template/hello/hello-handout/driver.sh b/autolab/lab_template/hello/hello-handout/driver.sh
new file mode 100755
index 0000000..38ec60d
--- /dev/null
+++ b/autolab/lab_template/hello/hello-handout/driver.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# driver.sh - The simplest autograder we could think of. It checks
+#   that students can write a C program that compiles, and then
+#   executes with an exit status of zero.
+#   Usage: ./driver.sh
+
+# Compile the code
+echo "Compiling hello.c"
+(make clean; make)
+status=$?
+if [ ${status} -ne 0 ]; then
+    echo "Failure: Unable to compile hello.c (return status = ${status})"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+    exit
+fi
+
+# Run the code
+echo "Running ./hello"
+./hello
+status=$?
+if [ ${status} -eq 0 ]; then
+    echo "Success: ./hello runs with an exit status of 0"
+    echo "{\"scores\": {\"Correctness\": 100}}"
+else
+    echo "Failure: ./hello fails or returns nonzero exit status of ${status}"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+fi
+
+exit
+
diff --git a/autolab/lab_template/hello/hello-handout/hello.c b/autolab/lab_template/hello/hello-handout/hello.c
new file mode 100644
index 0000000..f63ff42
--- /dev/null
+++ b/autolab/lab_template/hello/hello-handout/hello.c
@@ -0,0 +1,3 @@
+/* 
+ * Hello Lab 
+ */
diff --git a/autolab/lab_template/hello/hello.rb b/autolab/lab_template/hello/hello.rb
new file mode 100644
index 0000000..02f1da0
--- /dev/null
+++ b/autolab/lab_template/hello/hello.rb
@@ -0,0 +1,11 @@
+require "AssessmentBase.rb"
+
+module Hello
+  include AssessmentBase
+
+  def assessmentInitialize(course)
+    super("hello",course)
+    @problems = []
+  end
+
+end
diff --git a/autolab/lab_template/hello/hello.yml b/autolab/lab_template/hello/hello.yml
new file mode 100644
index 0000000..6e4734e
--- /dev/null
+++ b/autolab/lab_template/hello/hello.yml
@@ -0,0 +1,24 @@
+---
+general:
+  name: hello
+  description: ''
+  display_name: Hello
+  handin_filename: hello.c
+  handin_directory: handin
+  max_grace_days: 0
+  handout: hello-handout.tar
+  writeup: writeup/hello.html
+  max_submissions: -1
+  disable_handins: false
+  max_size: 2
+  has_svn: false
+  category_name: Lab
+problems:
+- name: Correctness
+  description: ''
+  max_score: 100.0
+  optional: false
+autograder:
+  autograde_timeout: 180
+  autograde_image: autograding_image
+  release_score: true
diff --git a/autolab/lab_template/hello/src/Makefile b/autolab/lab_template/hello/src/Makefile
new file mode 100644
index 0000000..c27bc04
--- /dev/null
+++ b/autolab/lab_template/hello/src/Makefile
@@ -0,0 +1,7 @@
+# Makefile for the Hello Lab
+all: 
+	gcc hello.c -o hello
+
+clean:
+	rm -rf *~ hello
+
diff --git a/autolab/lab_template/hello/src/Makefile-handout b/autolab/lab_template/hello/src/Makefile-handout
new file mode 100644
index 0000000..f68bf26
--- /dev/null
+++ b/autolab/lab_template/hello/src/Makefile-handout
@@ -0,0 +1,8 @@
+# Student makefile for the Hello Lab
+all: 
+	gcc hello.c -o hello
+
+clean:
+	rm -rf *~ hello
+
+
diff --git a/autolab/lab_template/hello/src/README b/autolab/lab_template/hello/src/README
new file mode 100644
index 0000000..9a62e29
--- /dev/null
+++ b/autolab/lab_template/hello/src/README
@@ -0,0 +1,16 @@
+This directory contains all of the code files for the Hello Lab,
+including the files that are handed out to students.
+
+Files:
+
+# Autograder and solution files
+Makefile                Makefile and ...
+README                  ... README for this directory
+driver.sh*              Autograder
+hello.c                 Solution hello.c file
+
+# Files that are handed out to students
+Makefile-handout        Makefile and ...
+README-handout          ... README handed out to students
+hello.c-handout         Blank hello.c file handed out to students
+
diff --git a/autolab/lab_template/hello/src/README-handout b/autolab/lab_template/hello/src/README-handout
new file mode 100644
index 0000000..c95cd25
--- /dev/null
+++ b/autolab/lab_template/hello/src/README-handout
@@ -0,0 +1,15 @@
+For this lab, you should write a tiny C program, called "hello.c",
+that prints "hello, world" to stdout and then indicates success by
+exiting with a status of zero.
+
+To test your work: 
+linux> make clean; make; ./hello
+
+To run the same autograder that Autolab will use when you submit:
+linux> ./driver.sh
+
+Files:
+README          This file
+Makefile        Compiles hello.c
+driver.sh       Autolab autograder
+hello.c         Empty C file that you will edit
diff --git a/autolab/lab_template/hello/src/driver.sh b/autolab/lab_template/hello/src/driver.sh
new file mode 100755
index 0000000..38ec60d
--- /dev/null
+++ b/autolab/lab_template/hello/src/driver.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# driver.sh - The simplest autograder we could think of. It checks
+#   that students can write a C program that compiles, and then
+#   executes with an exit status of zero.
+#   Usage: ./driver.sh
+
+# Compile the code
+echo "Compiling hello.c"
+(make clean; make)
+status=$?
+if [ ${status} -ne 0 ]; then
+    echo "Failure: Unable to compile hello.c (return status = ${status})"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+    exit
+fi
+
+# Run the code
+echo "Running ./hello"
+./hello
+status=$?
+if [ ${status} -eq 0 ]; then
+    echo "Success: ./hello runs with an exit status of 0"
+    echo "{\"scores\": {\"Correctness\": 100}}"
+else
+    echo "Failure: ./hello fails or returns nonzero exit status of ${status}"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+fi
+
+exit
+
diff --git a/autolab/lab_template/hello/src/hello.c b/autolab/lab_template/hello/src/hello.c
new file mode 100644
index 0000000..8863e27
--- /dev/null
+++ b/autolab/lab_template/hello/src/hello.c
@@ -0,0 +1,9 @@
+/* Solution for the Hello Lab */
+#include <stdio.h>
+
+int main()
+{
+    printf("Hello, world\n");
+    return 0; /* important to return zero here */
+}
+
diff --git a/autolab/lab_template/hello/src/hello.c-handout b/autolab/lab_template/hello/src/hello.c-handout
new file mode 100644
index 0000000..f63ff42
--- /dev/null
+++ b/autolab/lab_template/hello/src/hello.c-handout
@@ -0,0 +1,3 @@
+/* 
+ * Hello Lab 
+ */
diff --git a/autolab/lab_template/hello/test-autograder/Makefile b/autolab/lab_template/hello/test-autograder/Makefile
new file mode 100644
index 0000000..3b7ebba
--- /dev/null
+++ b/autolab/lab_template/hello/test-autograder/Makefile
@@ -0,0 +1,7 @@
+all:
+	tar xvf autograde.tar
+	cp hello.c hello-handout
+	(cd hello-handout; ./driver.sh)
+
+clean:
+	rm -rf *~ hello-handout
diff --git a/autolab/lab_template/hello/test-autograder/autograde.tar b/autolab/lab_template/hello/test-autograder/autograde.tar
new file mode 100644
index 0000000000000000000000000000000000000000..fa96fcf301077c37769d440fd31ff170f7c8552b
GIT binary patch
literal 10240
zcmc~u&B@8v%}C5k$uBL@XP^l%FfcGMH#cQ4FfcGPGcp6w28JdkU>YQ6U|_&tXk>0`
zY-nO)Xk^Y{U}$7&Xw0BsN)y+h8CqIgl31jmz>rdupMfTY!YwUMEh<J485SJW`1eiB
zPEE_qNu{pWKyhql299s6@o#8s0*-$Jb3<bz10x0lLlZ+2BL)Qn>N*3vX(K%Tl@)?Z
zN>ftvN)&RTaj%e;U!+iyk*eSUDjF1g5|g+Rb8@T{xH!|3lNG?}MK4)FHy^^};!4g*
zP0X|6;w;Kl&@D<+(5iz;aA6PdQPIH^0^s<Bm;XVojxN5g)OH`C{BLex4$c2YrUoXa
z3<ieACI$wh`JdYHfXzHNaAwaeR>(<A(ov|)FI6bc$S=)FQ7A9UEJ;;JR4B>Jt5k4S
zC@9KLFG|eSQAkeA$w^I7Py#0$y<{aFu9A$z5`}`I%)F9fsIZPgd45q&ijqP}zCv*c
zsAZv$n3n=7RPq!u^HMUC6H8Ky6^cuflT(X}6_P5sQY$h`GV{_E$}>wc6cQDROA<>;
zixu+I6sl5-^7Xj5Lh=<#Qj1GK?kfVBoo%JSm6MrQT44t+wiLi+kTsZXt)Qn5b{kk%
zQE48?T7}}o+*F0c(h`s-Qc^+XTVjcVV`)i#PGXWmd1g+ILTPcTLU~4N9>@-b;?ktt
z%n~b@9eVmHMVV!(MS8^<TwHFMIjO}~T#&4y00$u%nZ*jAx|0i5iNd6u^K%P8dLZSZ
zUNRTlFsKTcI}q;U0{cKO8LB`*!8Ny_1QZP*3lzWs4vH$U!&6f-OUN&3!4|>uKbWVN
zj4h#1AVw(v8yJ~E^S_~yp}8R_|C^hdj^=*~f}Cir`dSKH3R;M|9>mipKRFR?@2JAT
z5dz>G56}Pbl8LI01Lc2n0|QWh4_o`+&;(xp8=9M%nSt8>#wMfrpQ>?y%P?g{{iMu1
z{iH-t)uyZfPXG$K3LzP(3dNbZ1v#La7g0AUl&2~rgPRB?8JT(63i)Yz3Z5kj$r-81
z*~MJS3JQ?=sTk6&C{{>L%!4;3;Oz*Anq+9rr~_{vfc2+Vq$ZbwS_qKlK%N4qd4SO<
z0NE5;oS2?!rGQdDD=R=NTyVvooS%})m71K9ub>1G1~oIliC!;RiAw|2=|OHiXmUZ?
z9=0m>T$yPK(F!Wn5P@0+-Mmx<1BF;?kg<7Opqd$CtXpDcPH9o9l|pD<Vp2{jsLccQ
z7p%fp&?rhRDJ{x_dBGN8s-_Z>N!2k*#mV_asl_o$RtnWIO3wL3MXAXpd8x%<5rf)V
zsOpN$60WpNE-qz-AZYsr$(uo?d3hjDLYqNKTu=rcpQRRJ^;s~afd&d+aEK`smF5*g
z+h`b(VgPj;IH(Eu#Lxijm(-l%R3z`i0}|?Bg|x)XoMMIiA_YkB6)WWB=Ybl8=;;9-
zM<h4_>;O<0g3=b(XoZKXd>9oU4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C
U(GVC7fzc2c4S~@R7`h<<05hd5=l}o!

literal 0
HcmV?d00001

diff --git a/autolab/lab_template/hello/test-autograder/hello.c b/autolab/lab_template/hello/test-autograder/hello.c
new file mode 100644
index 0000000..fa3a965
--- /dev/null
+++ b/autolab/lab_template/hello/test-autograder/hello.c
@@ -0,0 +1,9 @@
+/* Solution for the Hello Lab */
+#include <stdio.h>
+
+int main()
+{
+    printf("Hello, world\n");
+    return 0;
+}
+
diff --git a/autolab/lab_template/hello/writeup/README b/autolab/lab_template/hello/writeup/README
new file mode 100644
index 0000000..123c9e6
--- /dev/null
+++ b/autolab/lab_template/hello/writeup/README
@@ -0,0 +1,2 @@
+Contains the HTML writeup for the Hello Lab that students using the
+"View writeup" link.
diff --git a/autolab/lab_template/hello/writeup/hello.html b/autolab/lab_template/hello/writeup/hello.html
new file mode 100644
index 0000000..0dcea4b
--- /dev/null
+++ b/autolab/lab_template/hello/writeup/hello.html
@@ -0,0 +1,13 @@
+<h2>Hello Lab</h2>
+
+In this lab, you will write a C program, called <kbd>hello.c</kbd>, that prints "Hello, world" and
+then exits with a status of zero (the conventional way to indicate a
+successful termination).
+
+<p>
+Download the lab materials from Autolab using the "Download handout" link.
+
+<p>
+Submit your hello.c file to Autolab using the "Submit file" link.
+
+
diff --git a/autolab/lab_template/src/Makefile b/autolab/lab_template/src/Makefile
new file mode 100644
index 0000000..d815a12
--- /dev/null
+++ b/autolab/lab_template/src/Makefile
@@ -0,0 +1,7 @@
+# Makefile for the Hello Lab
+all: 
+	gcc hello3.c -o hello3
+
+clean:
+	rm -rf *~ hello3
+
diff --git a/autolab/lab_template/src/Makefile-handout b/autolab/lab_template/src/Makefile-handout
new file mode 100644
index 0000000..f68bf26
--- /dev/null
+++ b/autolab/lab_template/src/Makefile-handout
@@ -0,0 +1,8 @@
+# Student makefile for the Hello Lab
+all: 
+	gcc hello.c -o hello
+
+clean:
+	rm -rf *~ hello
+
+
diff --git a/autolab/lab_template/src/README b/autolab/lab_template/src/README
new file mode 100644
index 0000000..9a62e29
--- /dev/null
+++ b/autolab/lab_template/src/README
@@ -0,0 +1,16 @@
+This directory contains all of the code files for the Hello Lab,
+including the files that are handed out to students.
+
+Files:
+
+# Autograder and solution files
+Makefile                Makefile and ...
+README                  ... README for this directory
+driver.sh*              Autograder
+hello.c                 Solution hello.c file
+
+# Files that are handed out to students
+Makefile-handout        Makefile and ...
+README-handout          ... README handed out to students
+hello.c-handout         Blank hello.c file handed out to students
+
diff --git a/autolab/lab_template/src/README-handout b/autolab/lab_template/src/README-handout
new file mode 100644
index 0000000..c95cd25
--- /dev/null
+++ b/autolab/lab_template/src/README-handout
@@ -0,0 +1,15 @@
+For this lab, you should write a tiny C program, called "hello.c",
+that prints "hello, world" to stdout and then indicates success by
+exiting with a status of zero.
+
+To test your work: 
+linux> make clean; make; ./hello
+
+To run the same autograder that Autolab will use when you submit:
+linux> ./driver.sh
+
+Files:
+README          This file
+Makefile        Compiles hello.c
+driver.sh       Autolab autograder
+hello.c         Empty C file that you will edit
diff --git a/autolab/lab_template/src/driver.sh b/autolab/lab_template/src/driver.sh
new file mode 100755
index 0000000..2155ec8
--- /dev/null
+++ b/autolab/lab_template/src/driver.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# driver.sh - The simplest autograder we could think of. It checks
+#   that students can write a C program that compiles, and then
+#   executes with an exit status of zero.
+#   Usage: ./driver.sh
+
+# Compile the code
+# echo "Compiling hello3.c"
+# python3 -c "print('Hello world from python 2')"
+# python3 --version
+python3 driver_python.py
+
+(make clean; make)
+status=$?
+if [ ${status} -ne 0 ]; then
+    echo "Failure: Unable to compile hello3.c (return status = ${status})"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+    exit
+fi
+
+# Run the code
+echo "Running ./hello3"
+./hello3
+status=$?
+if [ ${status} -eq 0 ]; then
+    echo "Success: ./hello3 runs with an exit status of 0"
+    echo "{\"scores\": {\"Correctness\": 100}}"
+else
+    echo "Failure: ./hello fails or returns nonzero exit status of ${status}"
+    echo "{\"scores\": {\"Correctness\": 0}}"
+fi
+
+exit
+
diff --git a/autolab/lab_template/src/driver_python.py b/autolab/lab_template/src/driver_python.py
new file mode 100644
index 0000000..a879813
--- /dev/null
+++ b/autolab/lab_template/src/driver_python.py
@@ -0,0 +1,84 @@
+print("="*10)
+tag = "[driver_python.py]"
+print(tag, "I am going to have a chamor of a time grading your file!")
+import os
+import glob
+import shutil
+import sys
+import pickle
+# import io
+import time
+sys.stderr = sys.stdout
+wdir = os.getcwd()
+
+# print(os.system("cd"))
+
+
+def pfiles():
+    print("> Files in dir:")
+    for f in glob.glob(wdir + "/*"):
+        print(f)
+    print("---")
+
+# shutil.unpack_archive("student_sources.zip")
+student_token_file = '{{student_token_file}}'
+instructor_grade_script = '{{instructor_grade_file}}'
+grade_file_relative_destination = "{{grade_file_relative_destination}}"
+with open(student_token_file, 'rb') as f:
+    results = pickle.load(f)
+sources = results['sources'][0]
+pfiles()
+
+host_tmp_dir = wdir + "/tmp"
+import subprocess
+import docker_helpers
+print(f"{host_tmp_dir=}")
+print(f"{student_token_file=}")
+print(f"{instructor_grade_script=}")
+command, token = docker_helpers.student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade_script, grade_file_relative_destination)
+command = f"cd tmp && {command}"
+
+
+def rcom(cm):
+    print(f"running... ", cm)
+    start = time.time()
+    rs = subprocess.run(cm, capture_output=True, text=True, shell=True)
+    print(rs)
+    print("result of running command was", rs.stdout, "err", rs.stderr, "time", time.time() - start)
+rcom("ls")
+rcom('python3 --version')
+rcom('python --version')
+
+
+
+
+start = time.time()
+rcom(command)
+# print("Calling sub process...")
+# result = subprocess.run(command.split(), capture_output=True, text=True, shell=True).stdout
+# print("result of running command was", result, "time", time.time() - start)
+
+
+import time
+time.sleep(1)
+# print("> Files in dir:")
+pfiles()
+for f in glob.glob(host_tmp_dir + "/cs101/*"):
+    print("cs101/", f)
+print("---")
+
+print(f"{token=}")
+ls = glob.glob(token)
+print(ls)
+f = ls[0]
+with open(f, 'rb') as f:
+    results = pickle.load(f)
+print("results")
+print(results['total'])
+
+# if os.path.exists(host_tmp_dir):
+#     shutil.rmtree(host_tmp_dir)
+# with io.BytesIO(sources['zipfile']) as zb:
+#     with zipfile.ZipFile(zb) as zip:
+#         zip.extractall(host_tmp_dir
+print("="*10)
\ No newline at end of file
diff --git a/autolab/lab_template/src/hello.c b/autolab/lab_template/src/hello.c
new file mode 100644
index 0000000..8863e27
--- /dev/null
+++ b/autolab/lab_template/src/hello.c
@@ -0,0 +1,9 @@
+/* Solution for the Hello Lab */
+#include <stdio.h>
+
+int main()
+{
+    printf("Hello, world\n");
+    return 0; /* important to return zero here */
+}
+
diff --git a/autolab/lab_template/src/hello.c-handout b/autolab/lab_template/src/hello.c-handout
new file mode 100644
index 0000000..f63ff42
--- /dev/null
+++ b/autolab/lab_template/src/hello.c-handout
@@ -0,0 +1,3 @@
+/* 
+ * Hello Lab 
+ */
diff --git a/autolab/report_autolab.py b/autolab/report_autolab.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-39.pyc b/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dfacc25f48700797f5286eae416edaeaf0e2b5b
GIT binary patch
literal 833
zcmYe~<>g{vU|@)L(@eBuVqka-;vi#I1_lNP1_p*=Ck6(F6owSW7KSK>6s8pB7KSLs
zRHhV`Y^EZ+ROS@cRK^sx6t-rj*$i`;ni&}xQkhfOQyJ13QaBbdHZwLeF*2lZ1~X`K
zB{L!!4`MSjFfcfSY~W#FU?^cIVXR?jW^85*X3%7;TEwNGprGKMS(ciokf@N8SzMx^
zo?8i`)fI|TOG=CKieci(`FX{e#U+_}=?eL23MCn-3dM=JsS2q%sky0nCB+Jvc?w0T
zWvNBQsS5c;DXB$z3a)zTdSLUR;_)DJHKGl5jC71+H5H08@=J44pw=iv8|xV97{)4S
zB*JXeglN`ey2S$0c#8vSOo}FB6dS~mD;bJFzWWuUpOK%Ns$Wu?k*e>KpIi!dk$!1j
zW=VQcVoGX!K~ZK|Vo9ogYDHphK~8Ehj1ixbpPZdqq@S5rTvAk;T#{d;pImHcU<@{{
zJijR0P_Ll!77x^7u=_zFTFe9rV%8#1+$DqAAU*>F1Bea6;Na$9U|^_Ws9_Xm00(sv
zG?+`u^A+++bCXhwiWL$P6BH8jQWO%B5)?`@GK&?`O7oISGV}9bfm58BTaZ%;3!{>Z
z)XXA<;?i6zaIo4bB&MWjB<dt-D%dI{Y9}dx14EPP7GvTq#w0KYLa;C}Fx+BJOi2Mb
z3F3I>A`tr)M|^y4VrE`^e3f`)erb_HQEG8%P6;T2jTE#Mj1+7YUV{8nB^CsZT2SOC
zDir4?=H!4AizZ7ED0ROCMZ!x^XuM=+U|{gmWB~^a+={%!+|>B^TdV~|nRz7;CxLW8
dSYXF-*yQG?l;)(`frA(1G!AwSRt_c+MgUI{)#d;I

literal 0
HcmV?d00001

diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-39.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..056c94d504a238f8aba881d205b0fdfb6f498431
GIT binary patch
literal 1053
zcmYe~<>g{vU|=xK)J(KsVqka-;vi!d1_lNP1_p-WI0goW6owSW9EM!RD8^i-D5hNI
zC}u{G7*h^QE^8DkBZE6b3Udle3quM^Dq}Nq6k7^I3TqB~6uUb^3R?<$3quNfDpNCa
z6h|s&3P(0mQB*2xDn~OzGb1BI3TrTfCg)3#&6>=&I735Hi%Xmni&Jm01*I0`7nR&%
z$;eDe)nvRSo?4ceQ<_+k8easKh%YWFO-aowNd{>}#!N7WOE54nq%uS?rZ7Y?r7%V@
zr!b{3w=hJpq_6}tXtLg74NpzYHmp)m2+2rQNXslLE>S2fO)W0T%+FIu%P&$WPfg8M
zFw|2>W<)X_WH>7W1A{XtWDGzd!<fPZ3Yl6)Mur-O1xz)JDa<L%%}fgse3lee2tS1(
zm_d`xuLxwICgUyU#FUhmAOkb!{Qv*|CCFG!=3D&9#fApPdKvk-spa`a*@i{z3=9mn
zcoK_?Q;SMm3riDooHdzlu@tA~q=BVYG8FMLFfjaz(g$hLFDcDP)pyBHF3nBND=F45
z&C4uFFG@^FjV~z5EK4j&)laQR%q_@CErv1TQ}UCuQ;YO7^NLG~N|Q_Si}b-R(1!%G
zv0g#tEsm1Z;*$8ploU3Qw?PC0BM+khV-+7b4E12rewwVeIO5~;5_41I<8N`r$LHp!
zl;(igJn``$r$c4f<3YB@#~1N4FhHCS_5+0A1Brp`0HwfU5W&I7!dL_n@xvY23TTm?
z%96sA#+1$&#hS{N&KSj>!W_(?$#RPwlB$fW_??3l3=IqwAbbVmmm&-d41Ss%MFJq}
z1wn)mDBxL3GD~t&!M5Mxgr$$-Tdd$Py2VqFn4BG-nF~s(#SrI%A_Kw#djw=GNT?X(
zevn7F7>ht+pm54f%*>0A*W|h-3XdiuJp{7|ls1Z(K~4dM!Yx^Z6hc*eMruw$YEiLK
z5h(qD-B|=m_+U#Qo&i}8aS@mWa@H*l8%WgHfzo0z4+8^(2qO=Z00#%704oP00Nkwh
AzyJUM

literal 0
HcmV?d00001

diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-39.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f18f8be0976727ab41dd1114924b8483c6d3aac
GIT binary patch
literal 1246
zcmYe~<>g{vU|@)L(@YFuW?*;>;vi!d1_lNP1_p-WI0goW6owSW9EM!RD8^i-D5hNI
zC}u{G7*h^QE^8DkBZE6b3Udle3quM^Dq}Nq6k7^I3TqB~6uUb^3R?<$3quNfDpNCa
z6h|s&3P(0mQB*2xDn~OzGb1BI3TrTfCg)3#&6>=&I735Hi%Xmni&Jm01*I0`7nR&%
z$;eDe)nvRSo?4ceQ<_+k8easKh%YWFO-aowNd{>}#>_B>t1vJyq%uS?rZ7Y?r7%V@
zr!b{3w=hJpq_U>4q_DOyG&4rArLYAvXtLj84NpzYHmp)m2+2rQNXslLE>S2fO)W0T
z%+FIu%P&$WPfg8MFw|2>W<;`yiGhKEm4Si585Bwepip8=VFHCxEh8gC4Z{Ma8pagn
z6y|28g$O=N3M+)4!Vt`$$>vuCGH@m1E#}0Ol$Rg_Gw1yO|NkY(*p<vt{K>_J2F7|B
z`MIg(`9;}=MH~za3{gCZ#l@*bC9Z{~i8;<InQpNZr{<)ArB*T&@i8zk{7TjbY0)n!
z%}CXE$xklLP0cGQ)-TP=EJ-g)Oi7I|D9S8LEJ@W*tw_u*$Vn}RG2&D5le1Hc^fU8{
zONvU9OY)2K!7k8;1i5j1az03XNvd8!<t>hq)Z&u(#FP{^kpDph10xTk0Am#&I572K
z(zwG+fk2pn!-xeEMn&ul3=Bn_Ac7l2@PG)AXTd%$;%8uBxWx~%GCm_SB_%Zvq_7y2
zju}{Z7^_54ods3nr^$YcBR)PaF*h|n{uWn!d~SY9X%2|Z6CV!>VW<pyJScGD<BJ49
z7K02e5(KdjP5`q&?kEC<1xQ6PD7rWpSr|E3nHa&Mei*406zAY<1_})1)SAkY!j#68
z&KSj-%9hR;#h$_(%%I6~iye|JjH~#agB1)73>6@J1>=_@3=9l@njA$!AZH1K2$0Av
z){@MUoK&#wQJk<uU3`lb9I;V61&PVo@tL`xtX2#OO0Yk`1lTiT3=9k)(~3d90(pju
zu?Qpv3bowC%)Iz`O|B?Wc-k`3Loka#o+|?7$0AU0M9Ctg5US!cQgaGYi;9hkK#3mg
s&LT;W>p_-4JOg$Nf&e+|7KaTaqU=B!s2G$QL>PIP1o$`@1z0&40YO+9MF0Q*

literal 0
HcmV?d00001

diff --git a/examples/example_docker/instructor/cs103/deploy.py b/examples/example_docker/instructor/cs103/deploy.py
index 2429949..8bd5571 100644
--- a/examples/example_docker/instructor/cs103/deploy.py
+++ b/examples/example_docker/instructor/cs103/deploy.py
@@ -31,8 +31,8 @@ def run_student_code_on_docker(Dockerfile, student_token_file):
 
 if __name__ == "__main__":
     # Step 1: Deploy the students files and return the directory they were written to
-    student_directory = deploy_student_files()
-
+    # student_directory = deploy_student_files()
+    student_directory = "../../students/cs103"
     # Step 2: Simulate that the student run their report script and generate a .token file.
     os.system("cd ../../students && python -m cs103.report3_grade")
     student_token_file = glob.glob(student_directory + "/*.token")[0]
diff --git a/examples/example_docker/instructor/cs103/report3_complete_grade.py b/examples/example_docker/instructor/cs103/report3_complete_grade.py
index b0deb8e..9dfbd03 100644
--- a/examples/example_docker/instructor/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_complete_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6066800000000075732e'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f8756a00000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/cs103/report3_grade.py b/examples/example_docker/instructor/cs103/report3_grade.py
index 7a98ca2..af156c0 100644
--- a/examples/example_docker/instructor/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/cs103/report3_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f7198800000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b8694473f032000000000008c0474696d6594473f4186000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl b/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl
index 798c5ea433edaa5ae586e2d0aa2afaa78549e7cd..fc298168d395c432420ad99533ade24705a6e589 100644
GIT binary patch
delta 15
TcmYdDm=MNg&%A&E1WNS)A6^4C

delta 15
QcmYdDm=MOr00pIb02j>yQ~&?~

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc
deleted file mode 100644
index de977681ea2f743536e28e736d54eac5477fa5a8..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 922
zcmWIL<>g{vU|_hIpq^;P%)sy%#6iaF3=9ko3=9m#UJMKjDGX5zDU2yhEeuhNDa<J>
zEeugismv*?*-S-lsVpgMsZ1&CDeTQmvl-?xH8V0Yq_U)Nq%x+kr*NilrEsUSG&43c
zF*2m^1T$#zR;6>fRwU*Y<fJMTmz1WY=9MTU=clCVDTHKX7AvG>=A<fQ7Aqu{mgMIq
zmSiR;=Hyf=q^IVk7A2OXrYNKp<>xAtWTb)>7As`tfjOCZ#U(|h$tC$kx+$4OsUSur
zS281#e?V*|1_lOake7HE7#K<zQW%>Vn;27=f*CZKs}^x7C@3hnXO^YrDI_Z7WEPhw
zsOMIKXmy36)RNMoykeMma(-TMW^qYoUb;el8rb^c#N1Sc)ST4Z)Vva~n~GA)Qj3aH
z74nNxQj7EyT=mlRz~)26<3Z+XL>uZD=@`dqDimkrm*%8Etx<?J)-lpCj8)J`gxRPG
z(fmu?6BKG7OBB*d^O8$4^Yavv@>44PRx;ia_RBAUOQ)u|78T_eX);E!LHw|ip@@Zn
zf#H|9equp^esZy)fw6u@er{@ceo?leUP0w89%Pq+loo>mi-E0(je&t78O(+TCWsBf
zxB@Z>8i*z3`3iZZxk;%-#R>_D2?~jMDGCWm2?{0PScW+t>h<Ex+=84+SO}D4q-GW=
z6qn{&fdkD>Au%OIBT*+wQ^8gtQ9DVY2o#xqMIcvbGTmZKyv3LV_8x>_Wnf^q#hjRu
z0*YmbH&}`|L4q9d@wthadGYa8;*t5KMG8f!#icnVphz@Q&{i-~uvK^oGP6o72poN&
z2u@Te&P~k80Vjx;%nS?+FG0q?1Z56CP1alN@$o77$?@^vV1iqcmzbLxAAgIrpeQr1
n1mY-AkV04>{kJ%5a`RJ4b5iX<J}CxeL>6Wa4h}XBCJ{yeCW7>k

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/deploy.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/deploy.py
new file mode 100644
index 0000000..2429949
--- /dev/null
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/deploy.py
@@ -0,0 +1,52 @@
+import inspect
+from cs103.report3_complete import Report3
+from unitgrade_private2.hidden_create_files import setup_grade_file_report
+from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet
+from unitgrade_private2.deployment import remove_hidden_methods
+from unitgrade_private2.docker_helpers import docker_run_token_file
+import shutil
+import os
+import glob
+import pickle
+from snipper.snip_dir import snip_dir
+
+def deploy_student_files():
+    setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False)
+    Report3.reset()
+
+    fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py")
+    setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False)
+    sdir = "../../students/cs103"
+    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py'])
+    return sdir
+
+def run_student_code_on_docker(Dockerfile, student_token_file):
+    token = docker_run_token_file(Dockerfile_location=Dockerfile,
+                          host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp",
+                          student_token_file=student_token_file,
+                          instructor_grade_script="report3_complete_grade.py")
+    with open(token, 'rb') as f:
+        results = pickle.load(f)
+    return results
+
+if __name__ == "__main__":
+    # Step 1: Deploy the students files and return the directory they were written to
+    student_directory = deploy_student_files()
+
+    # Step 2: Simulate that the student run their report script and generate a .token file.
+    os.system("cd ../../students && python -m cs103.report3_grade")
+    student_token_file = glob.glob(student_directory + "/*.token")[0]
+
+
+    # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt).
+    Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile"
+    os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .")
+
+    # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file:
+    checked_token = run_student_code_on_docker(Dockerfile, student_token_file)
+
+    # Let's quickly compare the students score to what we got (the dictionary contains all relevant information including code).
+    with open(student_token_file, 'rb') as f:
+        results = pickle.load(f)
+    print("Student's score was:", results['total'])
+    print("My independent evaluation of the students score was", checked_token['total'])
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
index 3543f1b..286b79f 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py
@@ -1,19 +1,14 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
-def reverse_list(mylist): 
+def reverse_list(mylist): #!f
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
     reverse_list([1,2,3]) should return [3,2,1] (as a list).
     """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return list(reversed(mylist))
 
-def add(a,b): 
+def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return a+b
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
index 7d4b431..c9a23ec 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py
@@ -1,6 +1,3 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
 from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
 
@@ -19,4 +16,4 @@ class Report3(Report):
     pack_imports = [cs103]
 
 if __name__ == "__main__":
-    evaluate_report_student(Report3())
+    evaluate_report_student(Report3())
\ No newline at end of file
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete.py
new file mode 100644
index 0000000..37c50b9
--- /dev/null
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete.py
@@ -0,0 +1,25 @@
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
+from unitgrade2.unitgrade_helpers2 import evaluate_report_student
+
+class Week1(UTestCase):
+    """ The first question for week 1. """
+    def test_add(self):
+        from cs103.homework1 import add
+        self.assertEqualC(add(2,2))
+        self.assertEqualC(add(-100, 5))
+
+    @hide
+    def test_add_hidden(self):
+        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.
+        # See the output in the student directory for more information.
+        from cs103.homework1 import add
+        self.assertEqualC(add(2,2))
+
+import cs103
+class Report3(Report):
+    title = "CS 101 Report 3"
+    questions = [(Week1, 20)]  # Include a single question for 10 credits.
+    pack_imports = [cs103]
+
+if __name__ == "__main__":
+    evaluate_report_student(Report3())
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
index b0deb8e..9dfbd03 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py
@@ -429,7 +429,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n    @hide\n    def test_add_hidden(self):\n        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n        # See the output in the student directory for more information.\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f6066800000000075732e'
+report1_payload = '80049570000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d659486944700000000000000008c0474696d6594473f8756a00000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
index 03baa4e..af156c0 100644
--- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
+++ b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py
@@ -1,6 +1,4 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
+
 import numpy as np
 from tabulate import tabulate
 from datetime import datetime
@@ -431,9 +429,9 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f7198800000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b8694473f032000000000008c0474696d6594473f4186000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
 output_dir = os.path.dirname(__file__)
-gather_upload_to_campusnet(report, output_dir)
+gather_upload_to_campusnet(report, output_dir)
\ No newline at end of file
diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token b/examples/example_docker/students/cs103/Report3_handin_20_of_20.token
similarity index 79%
rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_0_of_20.token
rename to examples/example_docker/students/cs103/Report3_handin_20_of_20.token
index f341fcbe61424bb2cfb1ff275aada4c9ac0b1d87..e6e91d99f0946a63ec47434ea1be99bbe2d634b3 100644
GIT binary patch
delta 9876
zcmeydo#X5&jtSz7A`>M{7)2)fS{QMuFfjypGqZ>=fWQ_Onc(>QzRxU}7#KiUfPsM_
zx!BOaSidN>Ait=@Sg)XRavzK3<X#r3$&Xkxn7Fv6FA!rAXXKi`Q;bQAk!$*V9!3!F
zpBR%mBiHnYJdDZ^O3pyn(7-@P!BkU|i;F8W7i5|O*l4cgoW$Z{g&>Fp8W393ifj6O
zaVA+tuIc;4nY<XergKR!NilLw)?}8Rypc^^2H|GejnctewdPF?VP;?eVM&JRf2Eit
zn8|mlD3VhZ6ciL3GBQ(Axu)CjGRiS>O^@MajAi7SzMGfPosnxgl1J63;4euoRF5s4
zurbb#iGjh6Y2reO$y$t3+{%h+Tp&j&DJf0%V@#O5p3&A8CIF_3QcFsU@)UA1i%T?$
zQp-|{ic?cGaw|c6P}rxWrYR(*q-Z4SBxzbHC@ZE-PGAbr1nabeODNbXBx)xqC@Cp{
zC7?zoY9~P=z?Iozavih5<n_#UvPcdFrJk3sp4Gi)XJi0jd2mc93d5p1uQa!yQX#Qe
zA+KPv4!hj+iGqxBi7?56%CyY%oYWE+zceqiq$IVt1RkiFdBp{($#D7nVwlRrqV$5q
zqT*B-zqk@ERg#&TI$ctT(Ppv$hbt(t3SMH=oqk4$QHPOhdfp{QIgr?dON>$=dfg>P
zH4qKrPoFQ$$jZny9mZw{NyuGh^kC$gyq*)Je6r)oxzjg_Fs@_dnk>Vu43htMh0$pG
z9#KY<=^sTI^Cma)$V{&hWAvKb$dfU-kw;>>hd5&ph%q^rH<p=;i)->l-Y!P2>3TOn
z>1y)A8{(7Y&q*+HO|QSfC^?-^l2L|{YjWW^XMS)TfZ{wgr#RILBqlOl{wAXUBiH0a
z{zgWw>5Ns3lGD%KWb9<*ntoo2QGD`30Z`7Fc#Bbdx|}qlJR{e12WiF!(-mbHUBMP_
zyr?pLq6|21I^F@<K0Wacqr~+0GK?CGT$AH2$xi+#q&HbXSbch*ETi1?wX%$slNSmb
zO*d3z6q{}!$0!44O_mjr05d>BoYQB^F$#jYlWRpJA&j*mRv?D(<liC@Y+!FFPhTj{
zXbWXeULdN<$Td0Ps+j;p668q*urDX`i^;J=1i2=kzbenjHTlC;k;&O&ywmq9Fe-pd
zm7V@qfl*Ebq6n-LM1XaJq_shW@bm&jMpmeG(`ywO6{gQqWK^46FK#e-gSg$~g~Gy<
z=U)dKCp^7IiO~dZr{N9Z>1UJ}c^J7S8{UwDnea!6QC%6y#$p9q1tVh<B$0x|;$kkY
z$qR3QVv_MOqx$q7Wk%WQtCSg~CpSpSL!1k8XhBhCUWta1f|4fJ^gqgsF4G-V7)>S{
zNXbq1mns59?Jeoa_ob{<xwyclL-Sx!YH?{!Nii1}7bqi_79=KTYZPi)P48A^^qGD`
zmC;2EtU5!%Rw1uIFR?hWs3@^gBSUlYJ84iFoxK0HF(cRHg`slO*QqgbF>+0=kJe-Y
zCq<FT33ujzl8ET!{0!E~xw1kKj{RMc=^xY?B^kLU=ihan?ytcZ#K<+>?**eGBiHo8
z7mS9W6mw5W6l^ajJRnttUP)$2PU_@De#yxj?rAb|O;>!$s5yOsCSx!o*YpK18Qqlg
zlpw`UNn%n?YJ5s!Nuq+SLUb(HoID=|TLlY)>4{p5QjoM!oSa{js$i>7UCTASPm9rF
za-)Lj^uJn+#?uY8866q9rYpQ=bedeKXfgeRHly5hejP>?P|foiL`^>U5aif;9Y!M|
zu<@YqS5_!guvIA3(41VaBsu-Q4x{Ph^Gc==Z-C@MI5|JBBr~rxb$X&MqX8q=bo;lA
zVvJmq1s_XKZdA6Ld_Y+mlzf*<PPfqm<)X<GbycP}>Vay&Nl(P4Z`5PdoBlwLQEc)?
zmAuIcs#en%>NEOJ|ESLx3-J@kiYiD7nx0_5s5`ySfYERILqkT9>2p6YvU4Nc$~ArC
z2gcmV57mVyJE&`b=;?VM8HFK917wUcNSh2;VnI72%VY&z!O0KRH%)%1E<SzkCq@U5
z%;f(X79e`M-DgIw>9dU)L#KZ<W|Ri0nOxAuGF`)jQ3=d3c*DV2Xrz~!msz5r2@{#@
zt0jeGA4qum1rx>)FvG@_Q4GYGT+qfkJ>Qg(56m%m!!bE89mW=BR!{<)HhsZYMoFjw
zp~>&G{b6jTRIvPHgE!tFA+S1BD}|>c`A&R#zd6*Vd|e^1j{D|}GRQg(>RKanAF2ya
z7x=+A2^0#G3*PZhzNlx2EYGJOHGRD$qaU)^2O||w*h)+?um$DH8excLW2wmv1}cnP
zlRwV0oczE*ev+XYBiD5K-;7GrCt5Qqp~PL`Z^qe-T+{jfFp5vsGwPmZ%V;_oYOLt=
z0$WBNkdf0T*fN4VAw0Rj*a_JI`;28l@{`{in}F!)wRVgGAkR*YH&FozOfE2HV<|4y
z<eL7{j!~D9Yr3R8qr!A6dqzD_rIc&Ws4+Rf6ojRw-?V3RVC0%E<G^S>na?bqk!#Wu
z(aF9R9H2xiHd)t#gOO|UIx~>3=6+LT<eGfon;F&uYckJw7e=n>7aSS2Kt;%RmFbF3
zj8=?XlOKGSW8|9jLk?VGOfIm{WaOGW$3kiHehX1XuF3O%$TD(G-uJ_ak!yN`Gh;TW
z;1T#PKKX#9&E&t94wLPy<XN~t6(g*5;0deK(JKF};`}_A6rv%JRAmGcEXYjG&VjcG
z@{75qTe&hiPOougG@QQ9l~Ib3Yx-$dMoD>a4l7D6&Mz%WP6ZWS)wN)uTm@T&%-n*>
z2drf$-?LT}1`DPs*ec}e#mA>*=A_2QPmgkAl$(6c7F3?iXJay%%x}j%o!6a_ee!u5
z<H_;1;?on{8Koz$wUy)xDJo3`n*gqyCdb?QPF`y($H)cJJ^7ri>STp~Jl<dh(1vkU
zW&y|&u#hsyA^Alm3dJQwsfkb_r~<vH%mTN}9Ejmiy_xx3lWYDNfMW6=D7er5Q=J~~
z$;cxP)(S2w6sj@{^va7eOHws7xu(Z+GMO=QO%D7Ia#7)b4M?%S{=Wj4F+ISG(Hp|*
z;bIbI<eJ>*U^xA!7o+y%`wseyTvI<XDuU8k3^x<FjOAtmWfN4xrrYx{aj=4XqzN&}
zo`;DO!V2(W)SKSJ!^8vOuH#`+nY`MG8^n{Ge8EWql8?aQ#x-4liBV>{6)zM2WJPC?
zW%;~JDqt6gfLb-v*Yh%2K-A|!Y7Yl#M&rqUoP89)UPr2ML7@QU+A3+~X-?nh$0#@b
zf*+&8^pAdwCLsUt@iS>n=5uugB~^YV?a7U<pq|V{ex_(plTCohLI{#ak(18k05{d?
zdjc5s7`dij6ksx*t`Nv*G?~{uefsV|Mvuw++(AXed3Hun<Ln(fqr>DzSK;aNgBV37
z=XvN%e;5P_$&W%zdZ2Q}LmE`$2s0^wnjoH_P%ZS-nEoIb)Cirf9m428y*dQkkPYBs
z6rH{=gpqstsSrj50f@Vx-cwN0oc=e2(HGJzfC}0v=_r)sXQ$?APL}aDWd*k-rtg<z
z(&8*gtjx(zOwqKO+~ch?{eLK<-gLb%M#Jd|VT`hns<8;ty?|8mTwGk61AW#Xn$F12
z_)XQq+|(@500zuW&5eyx)w%K#b5m`VAl*7+C9di60*ubne@TM-hv$45b*JwXVC1d`
z_2QfgHr1GNF))BIKfLFel3I|HU#VA62}|G5en47LelDa3sRwb4aeQ)qZb43JNh&PI
zKrQ70D=p2-EJ-g)Oi7I|D9S8LEJ-!e%g9VgNzIE-E=r9_EJ+3D>tdMR;?$DTf_R7q
zP|A&mSVG9G^u&^k)S~#(0#GnQ&6<AJmr=SNr%s3mb5rw5pz0NhQgidmQsbe{%S|oG
z$WJN8tv^3GJGCf2BQ>WWwFvJ16o_b1X<j@ys_VhtgO!iP8KoteIWQig2u;t)PlAa;
z%TRFO73XCZ6r>jEf#~>@%p#aeVSI2BQh@j*zPO|`B{i=EY+<p6rWH8%)#C_IXoufe
zM<F*eFEg#u)-5roI8{d>KPj!WI61K-6(*2ck(yiz<3d`F(D>CWN-a(;(a?k>{IvYi
z5*-DIRCs1dMt*6DM`lV&eJZF>z#6g$C*+rwfa2O#2@=_$;SOjc6~F6|>`BeT?vr}_
zUMU6zs;z>Oo}Rv*o<1}Li}k@HB#=%xET}agB@QSSY(dH)N^}%bQj1H#f*_-G6q0jN
z6Z7Ihf|+@VC7Jp4d0<IURivYkT9KSnnv!Z8tsWm=P??;VoRJzIudbt@uB8V~EFdN%
zGW7~7K^$m!z;l=uNIVu23(yh)WGy5afYKb)2l2`IDXH=KdGRS3;Pj~B0;WKLtfK%^
z1GW;JTA*nKETCYkfS!iIYLJYG&&f{)`7O^D$yiX;A#IeAUtAJjl3M`wbbhg3L1IaU
zUP@*Ws5C+{K~q6nK}o+Pw?Ih;tWOQoO}3aq5Vew-dBr6~rO74vMbL7hIJqdZpu|>b
z@;^37W>CKlR5ClL7bU4{f`&cPAUOe4`9QmI3bqQ6T&4#qE;Q0m0u-tSJmgXj&O^!y
z!6m5$3Win+F5p5@p(G;}7K+6Rpg=8FNX$!7fW`<&HYKwt739K7klIRx^3<YK1yIdj
zlA5Pbk`Fc&=0Ai|Ta27spP7?d49j6qXBb&21ZU=!<|LM+DwJd-mVj)4xkjO=G!Mkj
zEK-0Z9R*0(fSs3~nwMGx(v_$HNp4`L>wz7UU#wSLSzMButD%&fq5#hg^`HW!SV2uq
zp`fxPBR@|;7rpEUm(;M#3N3)Z=?I)?6l@hhMVlUo(7^OCC=o!DzLI9NK`gY-4fUw8
zl|sF9er`c#4#=aa3XsI6keQp9o~odcpH!BaUs{|~sZg0;s!*PplcSKIms6>blCMyb
zky)&epO>6!4H{Vk$rmYrdhChmsl^H<`3gm;g{7HAsi302Sg)j_L=zGQ5c5INsbH%B
zNxRdn*_pWNVNFj+s@2oehu3MkkgTGQ&<ZIuFoF|Z2PjA(dKHpNGjmcDbahJ-(-kn3
z=_zS)p(H{RD}|8MVw6ZPMvhKsOhXhvld>*2LUchs(^CM2Xkrm0L_z)R)Vz{nh4Rdj
z3{bj(rK4g<<pqvID~SEc8L7$HsVNW%1zS)`gV#9l(gs}0Amwwcg$<;>P*(6sEm1F4
zC@cl_87dV(jR=^#pedzT0X+Ip4~nyLP)aXPRY=b-QP6<ou;h}={Jg{>kk-7C#LT>6
zg~Xg3g`(7))UrfSrJk3TUz7_jrxY^tKm~7RUb+IvsnGZadpf@$H4mOL!Qlk<w2lIz
z_^2nc{D8Ldf+4v;9qM|8^2A~*B^?E5Ttusv<d-Dos6z{Js2<-+h0MH^)PmGJP=-@T
zElbQPg}6LFO`#s-UDO}}8LFhCfD$K2CL;#3L1V+^5*%I*?2HT`ECz4iLCObXND-=6
zPzh^(AdMD7nm#3yjoHOV2aQJujYkKKM+c2Z2aS!4O-2We4c-Wi4jMxTfky|8Aydku
zgT@L9ql3l@ka^_MLE}L>X#C*7^m1{gb)b=Bsp$?9OnX?(4J|D!rZY-1WwRQm85kQ)
zFO+1GRW&iRFoO*#8yFBfsI03Q;LXS+!i;-a6KJ^;2yba*V3@vNib+lZGz5*hkO(9X
z!CM;Bn83@5^z{lVk&OmvLM+l@U=V_+2a{VGKeA4*mts;-0QG)QSH^%9fQc=Q@f;wd
zbz?zhp^t%sl`t?cY-#KkU}OM~_36V$=8zo%vI4z>h0y#~YPy#+lR`Zk0~^C+Mh1pN
zB_;+2hN&B&DyH;sm*f}3L&qkj^!Vy$<maa9my~9tf`$}I!6Oui(FWKEx_)X!B51_D
n7{-W)3?-nAUry=a&CO3K%}I@)zD$})g+l<^<(qz6nn@M_25F^p

delta 8393
zcmX?misQ$2jtSz73=<_x7#Sw|TGYRnVqysJW@Zs#0D)CLQo&qD*aOU%85lrVh=GA2
zx!BOaSU)2_H?=&!DBDo4pi)UmiI>Z@A~ClhCsm=iq%<WpuS6j^KP6R9AtWQSSRpMl
zCsiS{SRt{rBtJK?Br`cNC#O;&JvA@2D6u3pMIo&yKUbk7BUK?SQ!OX8SRpeH%*)Ix
zE-5NaF3B&_P01`u1u-glxxki9Ud1NG#ml9jpr8QaPd?9<z{opUpWRjyAqbXLRtWKT
z@wZYiRLIH9OD$H&%`7g?%uCnf<(huKj8R_46XfPxkbBcg^O8$4^Yavv@>436G<msr
zxl&To6cSTXG!k`^G_4ePC$D4=VdS0smt9f;VhaJoW$+n3c`cJPBkyEK4hu%!$(0-i
zjJ%Uqa@a}oa-jx(nfteYdzly*_D)<NUJvprmBN|0U<OA_X<lYYdQoCZ>cs!t6E7$*
z@=pBV!~zPQ>GtwW;*7kLy&1Kcc)6zk=V1i#99cC$JV#zeWhgBtqibkjprc@_2@X|W
zuFTwm{Gt*Ca6;mpEXX7dQfQ~ZB+JM<`2wpqBk$yiOw!D}T)dz_Vg-lw<O63VC+D+q
zNudYO(KE|E7IQK(EasfVsVWLLJFhgipi&{RSRt=~cluswMmdlb3vV*Y*29$)RHkL7
z=cJav1wbAxNi8k``3>sC%)H`))MSK`{9?H3#G>?q#G>L<xIl3wLb4<?7a>uSQIwjP
zl9`vTpsbKGJzs`VosoC4AeSp75I5Xn)Sb>H%c#T1J9#6QJlNY8xgg$u%cTzEPd4CY
zW#pZFkxP1V05>~GboyOcMo&eMW9<|QDoZl*^Aw6wK>=$R4~{Oqf=ZC&<b@~aGV)G;
zAjh~4tgBz1Q5nRTtjBB2$U8Za*92rNpZMf8yy0M$#PkIUjD83v%XBV9MjtRg3rtE(
zzoEz&%K<VRgeUJk+sVi~*;YWFk$3t-B}M~A-sytMj8cre)3ucuWte!mCSN?~EC`N9
zP=-}V%_&Z`0*i`FU#H9{0CLUr{mP7ujJ(qwR2Ub7_><#>K&fJK?*)(n(+wVj($;hX
zRYq_CO%G6Ie89*%-9U}el?~(uUarZ4msA*er_WJiRA%IzzF&<|o{5)h+EYdeP{d0;
z1%>%^d38oPM&9YR>Wq3I(TPtP)fjmv^NNB5H>fjOf}GlW*^rTU`UNdUvB_&hWnj#E
zq7pDBj~K{jU<Ec}l2Fm<A2b-PK-wQ%v7er$$*9c%@vbuO^x2w>wwy2?*Yu5jj51t>
z#U+U)#hRc%IB?ZW7%GbrI0vqZPgc0bhR``#{~E|klLM}aOqP@2W#pZ{L5ooVV!yQ*
zvJoJ2AYlyhhwOA4ZALyNbOj(qNkyrN*-!~yuIam8f&*>(?U#(IjJ(s=X@jHko;IT$
z$Y*b_8-Xar8*WH$ns`Ha`UV|F9+1q$8&U{g-_>DMSA!Z2vPz*?!B)Y@*aTUqAhEa@
zl*t!Lfr8Oqml0fWoP5nF!^k`Nzmzm1@AUb)jPl|TJs?vGiZb&`G?Wy;#q(reX%|pY
zA#DP({i44ZXF8~&0f(yZEr;nV^%xC7G6!#g3fe*$D^_qynXG6nECObM%lFcP#N=#^
zLQN~)$rogN7<s2J)Mo@4n;<I&N+Wge7?nWjak{(#BiJ6sFuBQZWw}5G-;dVhgc~%S
zF`Q9yvV+`YkjzG5k;&&XSSP=g6M}K)-xUEd8-7TDN_}}3M&9XMMvOs>ywhWh7?nU)
z34CM(CA|$sjLJ|igTf4wrS(cOOL9_qLAgY7vcP>!kop5388sPsC!bdc2J!4aF}ktj
zDQWUf|7gr8!UqYnJRb#H1q%b->4GMVQqW{poSa{js$i>7UCYbGJKfrZ5mW)aQ#57d
zoxZ|^(U_5Udb}y4_+-9^S|DThe`a)o2w8xfzc5C4y8jnOE=JzT2FfZR1r1*qLG)zB
zM~dKZQZ!>U5`owbiZNw{LIqoeLJdvc$q$r4(OhT7XbRGo_?6KF>P?V35Khj|E6L0&
zP34{b*9@HWbj=yTg+SqBP+CYZXS8GFoj%W;Q5xiw1Ir~R`>BGm?niS_dYwE`Rh*G`
z`Ug)&F(w1v$@^9HB$1r~E8O)$QhB*1?|YKV$UC`E%?ebASTg!E@=mX}WQ-L;=%|9G
z*XeI88Fd+Xr%PIa%fv!mVNtj{!7U7s%awV#rU(3F1e>WVJe}8?Q3GT=i1~|Am<M61
zGVkO<UE%2qoEceJ{qpltd8aqR)QV3(V9huitWA9KTTKTrOL($_7ASr|ngnba<H3UB
ztR+R2Rv?q4Co5<xfyLyx3XSwK^D;|7Azi2|Kb_BxQ3~B+>B)0n1%Q={PuABF1GA(j
zC+P5lS@M$;bl9iM)iJU&@=jN@2m4ylo>7b4R!Pg)Koex2=JXF$jO^3b*)w{8wP=G>
zIxr@ID0@aNA*f2Ea6$Ip-T#byU~S@)>)r}MS@Lyk(*qnC#n9a<Jo%rV2AZIu<Ya?)
zJs^#Sl9S)*bAnj9!qfYm80C18!<cKb-1|sSE#%DThbF=ISsAKpfit5MC><TF5eA7E
z27)+&^DP;9r#HGVg6gsbF5vpX(FoL#nH>935!E>pjOH-%PT%OtxEPcRjX|k;GQ#)+
zZcxt(PmVDGd)82Qx`I0+nxD>^fEqo8y26wBzJMxXLsN+R#HW97XH)?@Op>j*7}Ri@
zZs@_N3(jNl9^it%-h)vOQl?E`?*T46Cl`EGXXKs!--9s<tT4)x(VUTY`b1Aq$^m5!
zIZ%PADmHn(IR{vQt{2FX>49G0B51P0cQa^B0!r1$rS0^OoJ=ktEtBgkKn26(``<yX
zJ8J>1^&EbH+E<ekAXLK-aHT8okx_nfoh2to*K}SVMkPkx>H0p5qKv$g_gjJ-a@W!s
zBsW>nDjVDsnq2T*e0m-alMN&9<cC(E44`N&4=rIqWfgKWE)d@ALT{pF73b%{C6U@|
zNmWL0;eyQM>>NY`4Wx>9x}P7TBO~whem`(Ae87)U3go)!xBVC;l_9CCD784hv?w_h
zR4#*xPf#G`D%dJy<`(dBP5$s3)Lby|XH;b5ogV1V2ufD>Z9oAt{h<Jp%JlUDOfr)%
z*n!eQfUP?t?{t{}aB=G&z-R<YbODUA&;$mpy!`_h#ituUba79fV9Pc=UVw=cqEwR8
zEinhw_~hk6H62v8J``XwfOdqS?bxc!0#Mxu@rE+U!Jr0saY<2XBJcElfsA6%<{4CZ
zW<D?1^h7}>P_LxV9#qB}27w$o`JO$f)L?Ydnk?tQ3(7DX|ARa@`Qm?2-934uu*g(?
zMg=BbuBnEM@nCsjk;$@-!l00xZ0HE8-<}6EYBTaq?se2><elyq0;ygn-**I+V~kGV
zrUr(gllM7su!B6H$;$<@Wb!^IPKYH=)B8gq&5w=3BGWg7GU`KYm6$Fi#v}pF{E+bE
znqI)fC^P-0C=;l>JLs$k3MxJ^CQ!4{F@#YRl9eXQxmZHA<v}aY098ig=~Cg0KG0-<
zoG(C00Lr&j(#q51oy_P8O55_Tpir3pk%iG1oVKU06=%|7<efa<)fHr|yaW>{5Cq&n
zMa%R838rX9-pT!LmZG5c1jtO3gf=}sl2H|;%~6s`7ep6GGJ)If?x2Jt5yhAes=uN@
zh4S>{QH*LJCHfqUvLM=zgV6y*Pv8I-X74<7K+URXMo4XaFq%;x6gSg9MuThB>G!0W
z6u<&{F^slgMotX4GUNC10CVrgfNM*gSVkR?u^YG;MW^S*GIE1B6VEe>Oz(?j<Oa8|
zK;D1`4Y<KJy+@V_)Qu?g2Dhv}$}*XNOq{&e+Z?1hQ4Sm=3O;&_yp#QW3>kSR*Zasy
zL6U4yYDsAktbYRvBVI1v%^Q8zA7bR4p1{xeO~cH<%*?_72Fy*(jg3;(dAafub5m`V
zg23G(V<k`_5~#%F%*Z=^p8%se$kwyWjJlGVsD18<4aOHzIT;yJIj8qYGs@7v+YQZy
z&^|2dXeX9yv=hrUeY6uh+KC<Q#6s$W(N64WCsuc~6AKzh812N4c49%DxzSGSXeV~G
z6AKy|9%7x?2M4B$DKM>r^kk)`-&0`P!)j_^W@s>dz9LihbQ1waB@OfBL{nJj*1&+Y
z?ybHiXwH<8NrV~qoIYr-AB6umGB8Y^t<0pL0Gj!SPd+m-FbII8q4<Ae8uN59WhObW
zD#Q#dBZE9t1&I3J7|1sLwlb40SQVmE&B!1Il7`~{jfMi#Q&pH0bXXWz82A|(7*0tt
wF)%PpEr%+Y(!*VnUl0$Prb$dsozlalpIA^Z{fi2d38Um>9!|;Wma0r50BI;an*aa+

diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc
index 77bcba4d2fb526f7d14ec3f8c1d6ac95ddeae116..301c79fcc3244747ff167131505a756d0f54b54e 100644
GIT binary patch
delta 374
zcmaFBevpkXl$V!_fq{V`BwRhwk7**GOuZWe149Z!3S$dH6hjJA3UdoX6k{q=3QIOq
zky9#j3TrB33R?<WGt+E_xlGNBj0~yFDeS2XDQqbmDV!->=`78R%}k68Dcr#fnmoyj
zAhV$u#Aaq-U~pz&U?}EcU|=X=C}FH&Xl86?3}(<|tXjmSprD{IS&VV6swUGdmYmGu
zl3N@_sb#4}#i=Qpj8Sa4l_0T|3`HPQe#KAz&gjIInO9s=RGM6pUo_c~Ns$+1M==w~
zLe?VI$@xsOd>jl63^fcjjN%L+OD4}_O4S8v*JQfIn0SjZ32YXGU}0cjxW$~9k^<5K
zF@(8@W3m}@h&M|SD+2>V5lG?|M|^x<Vs2`D{4LgkqRhM!h-Q$P5Ej@N4x8Nkl+v73
RJFtU5`Z(A*SUH$P7y;~gOM?Ia

delta 575
zcmX@e_JEx)l$V!_fq{Wxe}a0V8S_LwnR+h<28I-dD25cq6s8u2D8>}#6qXi-D5g~A
z6xM8}BDYkQ6t+~R6!sMMW~SK;bD5eM85vSpQaDl>Q`l2DQ@B#N(^;Aso0%9HQh0(H
zG<mDixm+s}a|?1(6^ctrQ&RIv6q55(QuP!<GBS%5(lT>W6*7wz5=%?+a}!H4lj{?6
zaw-+lQ}a@b5=&B36w-?Fa}`Q5Qo$;V6*BX{oXou9lA_Y&lKdjwl+2=35TlYSnGxg@
zC<d{a7#J9w85kIfc^DWNN*GcYn;DxJQ<#DoG?}XwaVaP$D7a^qrRFIlD&%Aqmnf*`
zR_0_*+}-W+OWYIW-dvDd(@OJ_OEUBG6q52&D*aY6-V*l9FM&&^rnnXr<rir(MzMi3
z7niJLC}Lq?VE7d~Ih)am1M1=8$=ewfZ9wA1ASW@f6|pfeFeHQ7Q0Ic!AdJJglLeSk
zjX_@WD*~y~WV*$ec#AO!Y!`%JWnf^q#hjRu0`e=w8kQo?$%~jm!dP#y$H%ASC&$Nw
z?JeR2DdmWd&r8frjgP;@T2PdkR{}8w<VFY!Y#xVAZhlH>PO2Tq>S9nrVPWRr;9%om
H5@7@Y$R3Pj

diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dfacc25f48700797f5286eae416edaeaf0e2b5b
GIT binary patch
literal 833
zcmYe~<>g{vU|@)L(@eBuVqka-;vi#I1_lNP1_p*=Ck6(F6owSW7KSK>6s8pB7KSLs
zRHhV`Y^EZ+ROS@cRK^sx6t-rj*$i`;ni&}xQkhfOQyJ13QaBbdHZwLeF*2lZ1~X`K
zB{L!!4`MSjFfcfSY~W#FU?^cIVXR?jW^85*X3%7;TEwNGprGKMS(ciokf@N8SzMx^
zo?8i`)fI|TOG=CKieci(`FX{e#U+_}=?eL23MCn-3dM=JsS2q%sky0nCB+Jvc?w0T
zWvNBQsS5c;DXB$z3a)zTdSLUR;_)DJHKGl5jC71+H5H08@=J44pw=iv8|xV97{)4S
zB*JXeglN`ey2S$0c#8vSOo}FB6dS~mD;bJFzWWuUpOK%Ns$Wu?k*e>KpIi!dk$!1j
zW=VQcVoGX!K~ZK|Vo9ogYDHphK~8Ehj1ixbpPZdqq@S5rTvAk;T#{d;pImHcU<@{{
zJijR0P_Ll!77x^7u=_zFTFe9rV%8#1+$DqAAU*>F1Bea6;Na$9U|^_Ws9_Xm00(sv
zG?+`u^A+++bCXhwiWL$P6BH8jQWO%B5)?`@GK&?`O7oISGV}9bfm58BTaZ%;3!{>Z
z)XXA<;?i6zaIo4bB&MWjB<dt-D%dI{Y9}dx14EPP7GvTq#w0KYLa;C}Fx+BJOi2Mb
z3F3I>A`tr)M|^y4VrE`^e3f`)erb_HQEG8%P6;T2jTE#Mj1+7YUV{8nB^CsZT2SOC
zDir4?=H!4AizZ7ED0ROCMZ!x^XuM=+U|{gmWB~^a+={%!+|>B^TdV~|nRz7;CxLW8
dSYXF-*yQG?l;)(`frA(1G!AwSRt_c+MgUI{)#d;I

literal 0
HcmV?d00001

diff --git a/examples/example_docker/students/cs103/__pycache__/report3.cpython-39.pyc b/examples/example_docker/students/cs103/__pycache__/report3.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..056c94d504a238f8aba881d205b0fdfb6f498431
GIT binary patch
literal 1053
zcmYe~<>g{vU|=xK)J(KsVqka-;vi!d1_lNP1_p-WI0goW6owSW9EM!RD8^i-D5hNI
zC}u{G7*h^QE^8DkBZE6b3Udle3quM^Dq}Nq6k7^I3TqB~6uUb^3R?<$3quNfDpNCa
z6h|s&3P(0mQB*2xDn~OzGb1BI3TrTfCg)3#&6>=&I735Hi%Xmni&Jm01*I0`7nR&%
z$;eDe)nvRSo?4ceQ<_+k8easKh%YWFO-aowNd{>}#!N7WOE54nq%uS?rZ7Y?r7%V@
zr!b{3w=hJpq_6}tXtLg74NpzYHmp)m2+2rQNXslLE>S2fO)W0T%+FIu%P&$WPfg8M
zFw|2>W<)X_WH>7W1A{XtWDGzd!<fPZ3Yl6)Mur-O1xz)JDa<L%%}fgse3lee2tS1(
zm_d`xuLxwICgUyU#FUhmAOkb!{Qv*|CCFG!=3D&9#fApPdKvk-spa`a*@i{z3=9mn
zcoK_?Q;SMm3riDooHdzlu@tA~q=BVYG8FMLFfjaz(g$hLFDcDP)pyBHF3nBND=F45
z&C4uFFG@^FjV~z5EK4j&)laQR%q_@CErv1TQ}UCuQ;YO7^NLG~N|Q_Si}b-R(1!%G
zv0g#tEsm1Z;*$8ploU3Qw?PC0BM+khV-+7b4E12rewwVeIO5~;5_41I<8N`r$LHp!
zl;(igJn``$r$c4f<3YB@#~1N4FhHCS_5+0A1Brp`0HwfU5W&I7!dL_n@xvY23TTm?
z%96sA#+1$&#hS{N&KSj>!W_(?$#RPwlB$fW_??3l3=IqwAbbVmmm&-d41Ss%MFJq}
z1wn)mDBxL3GD~t&!M5Mxgr$$-Tdd$Py2VqFn4BG-nF~s(#SrI%A_Kw#djw=GNT?X(
zevn7F7>ht+pm54f%*>0A*W|h-3XdiuJp{7|ls1Z(K~4dM!Yx^Z6hc*eMruw$YEiLK
z5h(qD-B|=m_+U#Qo&i}8aS@mWa@H*l8%WgHfzo0z4+8^(2qO=Z00#%704oP00Nkwh
AzyJUM

literal 0
HcmV?d00001

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f18f8be0976727ab41dd1114924b8483c6d3aac
GIT binary patch
literal 1246
zcmYe~<>g{vU|@)L(@YFuW?*;>;vi!d1_lNP1_p-WI0goW6owSW9EM!RD8^i-D5hNI
zC}u{G7*h^QE^8DkBZE6b3Udle3quM^Dq}Nq6k7^I3TqB~6uUb^3R?<$3quNfDpNCa
z6h|s&3P(0mQB*2xDn~OzGb1BI3TrTfCg)3#&6>=&I735Hi%Xmni&Jm01*I0`7nR&%
z$;eDe)nvRSo?4ceQ<_+k8easKh%YWFO-aowNd{>}#>_B>t1vJyq%uS?rZ7Y?r7%V@
zr!b{3w=hJpq_U>4q_DOyG&4rArLYAvXtLj84NpzYHmp)m2+2rQNXslLE>S2fO)W0T
z%+FIu%P&$WPfg8MFw|2>W<;`yiGhKEm4Si585Bwepip8=VFHCxEh8gC4Z{Ma8pagn
z6y|28g$O=N3M+)4!Vt`$$>vuCGH@m1E#}0Ol$Rg_Gw1yO|NkY(*p<vt{K>_J2F7|B
z`MIg(`9;}=MH~za3{gCZ#l@*bC9Z{~i8;<InQpNZr{<)ArB*T&@i8zk{7TjbY0)n!
z%}CXE$xklLP0cGQ)-TP=EJ-g)Oi7I|D9S8LEJ@W*tw_u*$Vn}RG2&D5le1Hc^fU8{
zONvU9OY)2K!7k8;1i5j1az03XNvd8!<t>hq)Z&u(#FP{^kpDph10xTk0Am#&I572K
z(zwG+fk2pn!-xeEMn&ul3=Bn_Ac7l2@PG)AXTd%$;%8uBxWx~%GCm_SB_%Zvq_7y2
zju}{Z7^_54ods3nr^$YcBR)PaF*h|n{uWn!d~SY9X%2|Z6CV!>VW<pyJScGD<BJ49
z7K02e5(KdjP5`q&?kEC<1xQ6PD7rWpSr|E3nHa&Mei*406zAY<1_})1)SAkY!j#68
z&KSj-%9hR;#h$_(%%I6~iye|JjH~#agB1)73>6@J1>=_@3=9l@njA$!AZH1K2$0Av
z){@MUoK&#wQJk<uU3`lb9I;V61&PVo@tL`xtX2#OO0Yk`1lTiT3=9k)(~3d90(pju
zu?Qpv3bowC%)Iz`O|B?Wc-k`3Loka#o+|?7$0AU0M9Ctg5US!cQgaGYi;9hkK#3mg
s&LT;W>p_-4JOg$Nf&e+|7KaTaqU=B!s2G$QL>PIP1o$`@1z0&40YO+9MF0Q*

literal 0
HcmV?d00001

diff --git a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc
deleted file mode 100644
index e1fe41d0d88015abab6229a54f2d8f5e6a8eb04c..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 57934
zcmWIL<>g{vU|`svpq`k@$;j{+#6iZ~3=9ko3=9m#a~K&IQW&BbQW#U1au}nSQW#U1
zbC`3PqnH^%Vk|i<xhzpEU^Y_@YZNP39a|I|m}ZY+2h$u;9AKI=iW5w8MR9>??kH|B
z%@f4~rg@`yQ#rHvQdm>iQaQ8uQ`mc%qXbd~vIJ8&QaDqEQkYV>Qn-6r85zK0JSn^>
zd@20B%uv2S3U7)a5??5VH$@nUFOtHWA`0QBXs3v$NVG6U38zS=NVPCTiMTVQNT<lO
zFr>(&vSo=jGe?P~=%mP{$hR;?iR1M%%#RXL5@0{T{3;nG3Fb>hNr7pQxI&6z3qzE2
zic*Sl3qzDlif)QZifRjElx&Jxih2t}lw68NlzfV2ltPMDlwzt}s$!~qszNh!lu{~3
zmhu9XRMmxyQEI8OsY<D8%}k68DNMl(ntD~~T&@*~xdl0?3dJR*DXDoS3d#8?sd@?_
z8JWcjX_+~x3Yo<UiKQj^xrrs2$%#2Rl?v&pd8tK-C8;S2X+`<D3MCn-V1>mBnR#GN
zW?pegQE75XevxiUW>G4LQOWfZltBD68E<ivBqo*SB$lLt*eQu6sU?}Ysa2w`Wr;bZ
zi6yBDmHDMb3Pq^}`9&pqiMyB>7@+R6Qs4^7S12mYQ%KCoQ7B0*E&*GZ2vuvPz{O>!
zP*7Qtk)NlKSX`W$o|l`NS7I2SQlM8*$pzD!pO;gqP*|E;T#}ier(mRzUxdWdGsI_x
zf^MOL5gt3h@*pGp@=H<`N<iUPoLrPyP*SXrlAl_vke6SgkY1FSg6c(ukbF#09fi{3
zR4XLMK*GZ?9;_eiAGgdrP-N(Uj0M@2SfY@bh8%dHcq>RODpAN!Q%F?E%}*)KNmbA&
zNKDR7Oi$I+QAo^70ml}^Xa$H<6^c>|OEZg7ixps6OEMBm6rj3vz#4RO74i$f!Izd_
z1Pk%p#7c#vRFJdNN^|tM-13VQK*=C6FF92Q<S<Yof!YL1RGE1SF8Rr&pjax_PtGqb
zDo%}uTBHwAY8;=O4@#OPsd@#K5Emz)Dog<RG7;tosBW+`^AwUZ67$ki72ruwp(I~H
z9bsbvI0TAH^N_<?H&+4G0zHUJ47s?9Q&SZ(N=gcft@QO%@{^18AZbl6zbIYbSkF?w
zv^X;_T|YTDB_}g4RWG9?H-}4;If^?a6%w+U`FT-nsRfxi`RP&oY57IDi6teeMe)fw
ziN(cLOuB`?811Vx{6VQR6_yjgNeG@+HB$A`^{l{&OjDC3iZw5>D82X=OG#xx>Miz^
z)U?FXoRV8C8L2r1ReZX-#TohKsTBpO$t9^NQC!6(`9-PmB}JvFRSLlw`Q_kvgh}hC
zq!wourKTtpr52awlxQ;DVoOX0#cvTi0|NuhFi=dCmZYXsDZvd$%quQWErO&uP@$m!
zRa0EWp{rY(ms?trTBQY64KW#%5(|nl^GY=I^z<}As&f<bQoy+wq`it$SGORsxHvH<
zr^>_;rNjXVBeK1oLTGVnT4|0#c}8lULP}~<X?l8QUb>!JGRVQ8at4HX85kIN7#J9w
zK^2i3BLhPT!ve;I47Kbvj42G+97SF=>{(1H4B4CzRxM)+V+~_ATai-=QwnnnLk;5s
z<`l+-j4lk#j4@2LjI~U893?C@jLnR-j3ul!3@I$lOhq*%Y+39JIBFQOI2SS&*VM2U
z*OYLju!2RoA)+a4&CE@VHOyc;yK308cxstzSirm)HSAfuwJarkH4HV3HOxE=HB2e&
zH7qskS^PEZSpqf8HEbyyy<9O&wH&paP*=Yz5lrE1W@Kb25vl>xHJr6vC4wbFHC)Y%
zU~$0|E-)>;K%_)?f#^bpg^VfOk_=g5wcKELiPvy1WK3a5;b~=(WJuu^XQ<@?i%Zn-
zEM%O(Smab9n8F7(K`@2Ch6hwtlnAB>fJACIf*CXg{cbVpB{PD87>bz~7#LU>7#M;<
zAuYndz>v;R!w}0;%UHqy3a^Pwg)G4gD;YEyZ!u@)fzlU~p201~vLa9;0!;i$bhe5K
zElw>ejwvb4NR5GK(wNe`%o1>!7hh16S(aFm8j}jCt%_lc_>}zQ?9`$dXzf)TlU!_Q
zU>pO<w#M)R^A?v)PG)flD23XYfZPdkFdG9CLzOlxzr{oJ#6wNev&qR%PRuE`(?jTt
z5&;<i$_DX}#2*hT%&JuF6kLmn@{6n#LNZbn3W`#7VagOB%E0wwl|H;0NUg{$F40qP
z&PYwphU6b;(FG|qb8-?v`Oi(0^A-mz|KAb>W$E~WqWtut)Z*g!q{O0Itc95+skyh<
z%TkMy@{3b%v4iqSVouI2Ua(qN28VKCZo0(=$(y%$z^Y2}bK+BrimJo{it@`ci!<}{
z5_1%wVUn4j_lr^CB`C6A{{8>|e-$S@K&#@x)f}|81{EB|3K@xIsi68up)$2ZAt^OA
zPoXF^J+rtZwJ0@34^n+Y`4CS-iiKhwg@T;a#Nt$N<qoPSK!Ivi#cXG1SEb<zX;Ty@
z=B6s-Cg$a(7AYhagS0CYC4!`&trR^?$y;m%iABY!MNwQ}Iv!j;G8W!qEiTB(EGYst
zN^h}eBo-%@loZ`!$}hgfQjl1Zaf>ap7-V0RC@g%SQCkd-+gqHj70IdK>h2bMVp1_k
zOOZGO149ugq2J<%22?6E@kD{c6c+Fh9?aZZtl+}02$WrJv1jHLgG%QpzVy_R{L+%t
zqO_vK+|=SIp5)S^qSU++F!vS*$Z2_rxv94}k`jwk!7Mg#>M7Rby~PGeu|)zP_XvRq
zxXYr1Gt=`xg??^kF{ok!d!|T&fq~%`3pildp@Dykt0XZgCpA7Lu_Q5y70ixeN-K(D
zO$S*G&Ylp052RNd&A+hBq7TYa7NGKgk%Li)k&TgqQHqI!Nrq93i37}IW0GQI`p;#|
z!3csZj694SOd^aDjC_n7j9iQy%tas*FiI6xSo!V6zyRtnFtsq$Ff3qL$N(ypYZ*%z
z7ckW@E@Z4_1eJ~{46RHl%xO%L3~(L`3Xc`UOJfdZ&}8$&u!;><fGNQWFe!+!VB;9U
zB$FgV8Z)Fg(`3HISbmG6AiuacGbtza7Ds+kNn&PRY6>{8G?|J(RnINPOfVNhfC8|H
zpMim)9TZsb@<*~1l=rcfHbtO(S0u&2!0-~3W;B^^v4NTfsl~UL@(QBZ6N?jziV`a|
znTljVQNx%4u?AeKg6z7*3O2bFWE;qL3`|0dT#QAaSV{)P6f$Opa*Aab7#LC+q8L*c
zqL|tl(ilO#j~0$7=5_`ahA5U`22JK$!lAH2IV82BB&ZaWCsBeEWEcp8{8|i3mL&|$
z3|Wjdj5SPsjEoE^48aVVjDDI-w^)ly3sQ@2am2@G=4F<|$7?d*Vku6|Nh{I;d5A5$
z9F$U5GT!2hhqRO8;~`!I`L4*Cfq|h1<Tp^*FfdlhU~!fn++<deEuicZAAgH0K0X)R
z9FC8_#S<T2SelpvkpY`|ix+BJa%xTv%tmma+JSNy*ltS(28M2!-5iWXAbv<PD9WMX
z$e_)@zyNBEfLkB8nBc7so)V@kW>BD}F!wUm@}{sXU;%YlTNrA17qCLo^+Lv4z7n<?
zz7%$Gh7xvh25`;Gvw$OoV<BTLe+gR(XAM78lp%|=R-lBdh9QLu)EcQ_$l?aKXliN%
zQb6qwZgB>4hFXCVwiKQk0da;Jj-s9v-aMuhK2WWm&QL3u!e1*?!n1&9Aw#WjCPOW#
zBP)<1*usz^Si+md*UVfiS|i%bP%Bo#U&D|k(9Gz<Fo7}FEQYC8Jcg-OqE<3Rs8$M;
z+p;-}KGcY(2#Yh+FlI9seJDIpBep=WMq(jjt#pk<mQbxsiU?F*CXGpqp;oqptwuJ5
zFGaK$)D;!0k%$*g0g)n^47GAKvL&MNVkP1!;w7Rf61_|dBx<A=GS<qcNY*OU$Y)8`
zD%L2LNY%(^NjEb}Fw`p5NS4UdD5Xe=Fw`hYGc+@bfW>MgYm{o_YGhKR=djj@)QH<K
z)F{<R#EaA@f^Cz5xMw!QT&7y(6xkYyERop^DRMQEvl-^H)T)%Q)hMUP*9etJ)hL5(
zDUqsC0ns81;tVw^knk^IOOXY~j^F~>6a|ENC}uIIGc9CfWGFmQB3C2Q%oxL5t6HmA
zt5hOWB448jYO~cUg4s%7wo;8^4MV(ejZzImya=?#%2NZ1eMNBW%RpkQMp%NOnXy)_
zMlD52ilJ7$My*CYMM;D~f}vI;ML9*Kg`rlnge^rClu}aE#2IQdYt&QJo0-HJQZ&Fk
zjTB8VPYdFc8jugPQ}{|0Y8bLani<6zQsl%Lz&v3P58{U!r5eRFrW%nHojFXkS~XfV
zf@z>Z1YN%<PEh7^2X$_vSW@!yb2S;GSkm(IN^UW7p%e|Eyb3DRgF%@`0ansVK$?o6
z=6N%yan4f2Qo{%>XBokzpeEBT=A6{LTdc(;MVSTg;xz|S9)NQvIAel}oT(tQK?NSD
zc_+Y7rHoc*VJm8@m=wyY1XWY?Rde-KBNbFVtW<riRD-KHgG&;NN>Wp-6u_-4Q0u6Q
zQ=u%i2*gsT;)ae|WagzSR7q%*7N;uc>Vmpp3ZSlWKDgOXta*#!WdH*M10{uSl`c+C
z1g933rskDoCgy~s7MJ+sC#IwpC4<5lRuqA18c=a83<?)e$qFit7chb=PNov38pbT<
z1uP3e$%(m!DT}p+Ig2fYv6qpNp#~JWOrS9v=Iqo;P4*%yP_ZnKo>~I3$vLq&)eqEU
zxy1r%S%b>gl+5Ik`1o5a*{PMqw^)kvi%M>>X6B`&R%o&m*)T9LMDc=#Z9p2GLEYXc
zE)X{!RQ47}34qwiiN&e$xv3=?`6<Ok;Nlh$wLT0C3`;>?-~f4yk%y6wk&m&;j-+tW
zLwCC-YY}Lmvd9rsv^#+akTpf1QX460fn5ShU$;cCh4vDVt3V;n!B`Z;z`)=JuCuH7
z17X8Vs+m>_s>PuGPikHYI6W7E`XHK2w>Tk9_so*YC~ioD3_Kw4@)bC1fIQ0Z!CsTK
z$N=OtLl9vEiUU?qyTAArYe{BFPHGjOmX?+R+_J(VNd^XnUyS-ioS^PI7eb<n1D38|
z+JMp{$jM+U7#J8pY)~SBXUr0Y8ir=Z6h=^qF3wQHkiwMBTqKpkoX3>HA`Ys-S^dBk
zf&F@mvAD>SfdSmbD=sP0WGXTSxitzzfMXC$fJ2bOCMPjBDJ9WPgOP#ZGsyZXMVxU6
zQ(MLD3>^qcu~MjFQqYKE%gxLKjW00iC`7RprzYp;rTk*ljA9MRFG<V+H7!a&j41Y$
z)RM%^oMKJIB9QNHG3VujT5XkSndv#HB~iQ(Ivz3}bBiS_KQr$Z7r4Iw8pSCtQUj%0
z?)Z3Uj*pMO#Zg>Rlm;5IED8ttP98*ffZ~J&BzTJh)MEuD<tQ#tCV^zfTb!wRrMciS
z(<n|@hpsqEG$%hX1*EFjttdYi!~kUtNP>jNbrg3fxS0hWo4LhX7!QtfP<J@5B#Iv_
z2hs^?hTUQ=D$R@H0NGz0RGL@h3UU}*DyVN-4Cw%|B^DHb5+SG+7{!uXT$)=H4T?DJ
zB2bG3sV4#I-V}j~j3QBx7sWsXsCRLT58P`66>{-uIf?1F*orfAQu9iR1VJi59gLy`
zP@wT77AI$B#wUThY_}MbqBv7Z;z6Z%UV0HYUFd;SG3EJ0v4g}Di%Ozci<9$<Qo&j1
z78@vRQj2ae=7C$c;0zGOQ5c_*nUa#47sXm!npu(>#a0LojiL;&S%vYpn2S=2Z!uOC
z!GrG>Q?cPKredQgrt;z_rj(K>c1Xf0j$((Tn&K#yf}En9B3+R4!O0X%fYNG_Dgy(<
z1W@KK0HsrA4n{sk2}S`%E=C?kHbxOfK1MMn7Dg6E4n`JcAvQJ^2__aM5k@9PE+#f+
z0k9f2Miyo^Mj<8^cQ!^ob`eGqMm9z^CO$?XMioX5Ml~irMjog-4n{5}0Y(mRzetBk
zjggNLY&%Lp4obnGJPj(HinSO(ZORhH6vk$z8paf+W~P3DTBaJN1xz&zp!P;MgDFEG
zLl8p*q`l4TcZ(&zAT=+Fsk}s!`4)S6Y6-X>%$!tZbc?mTD6=G0lR1hbt2jRo6ic^Q
zOY*Z*^KLPwAr*t5FuBE^R0URT!^pr;460ff7=;*%szK!msJj9RC=dpv2Dn>SF)}b@
zGSo8GFo0Z`#ZU{XXc)7YQW&$Dii}d2ii}E_vsf0e)-Yu;EM%-@Nnx&K%~L30t6|Dw
zTEL#dvXC)_Rg$5ZF^eOM6U3@zNnxsGO<_%8$l<DG1Jm5K>^1D*b}UB?+XCJijuf_q
zO#NB4oF&W)_-a@{<9M+VG0e4GwcNElH9Rg1u^zR&HM|S>Q`i?W7EJ)#FHpl(!@H2F
zmJiGm1d~uPFq;)*&O)YI{u<5&LN$Cf{0o^F844#9HX*{ghGT*7LQwyc!-k=TrG_zu
z(}tmjwT3Z;%LX*Y#{z0E#b%(mBMjk=35-P&V4G0vu3-V$St0_Kfx0J!r<ti=s8*mx
z05rl^!&t*_!+>zb1ja&%35<nYc?MuTf;G%AGisRe>#Si6X3*sIyTu0SAi0HrD-lSQ
z1*(IpnDq)OZ?RWp7Jx<vZgH377r@5sZ}CF(#AoJ$=5DIEVSO)<4#px)F;EKuq$@tY
z$R3p0z!fj34OpZIDyG;|GK)Y>0Z{i0Vnlp=ksL^dBR(E9>=GY;ixtc$ss)KMW#->v
zcd9H&E%x+}Vvou!aLdd|y~R?Vn3H{rBQ-CjxID8YqbLcal)Wf52V_D~1&GIyo>~I#
zM;DcXxGY7fi7A?bx0o|?3!*rSQj7CTi;_X5NN!OT$eDE@q8>zmtSxE-6*XK{nFaAl
zrD<uYMYou%G7E086y@ia++s<|EGoXm3i4#}Efx?J#hM1T7&I6i#gvAToA}ZbOEOZ6
zpkY?*0LoOLW(Wf#3#jN|5n$wFWMh(HWMSc8=3wGr<YN*6518;VDlkeh@qh<YIG7kf
zLnw?q%o2=Zj0)gFMU1g%3Mfm0s&!Cy1YwjcxeYWk%UH_<s^(Lez>qnGrI$5^sg}8x
zrGzns71XgQVN7AGVG;p%K}#4@*dbyiOf@W^&I%(#33ClY7E3c@3P(0eQAY_QWM~>B
z=E4vwP|I4vn8H=Vn#Bt0ZWhLsFs5*WOsHXCVaQ?wsWN9^Vu)wUWT<5Wo5Th&X#!&r
z$OSwgwI%E|3|VZTj%RF03{x$8Eo%u!4J&v^o}-qtgfok40e1<{0^S;iEWU+|HJl5X
zYPo8-YB*AOK^ZTLzlI})&xWCfqlT@9-G-rrF@?W|HH#nA=>wTwxUPn+hB-w5lsQ2|
z@)H<~<VrXf2rL8_R#|*m{3(Ls47J=f%qc>W3^7c#JhdR31VNqb35-QPDZ(`jS%TsW
zc}yuHwR|a}HQY5kHM})^X)Iz4wftb$34t;vI0FmU@TZ7DvTcdT0?``g8UauNg2=W=
zp)L+`9XRy(L19<Im?8lZ0S}!>`W1nO!`wnNS&DK%=@L;v=YqJ9=AUhq0C<WJX($(*
zjjN2EDiul#K-DX#*8&=D)dQFH;4x|)@W^UeYEfodC3unul!%M1elglrc{vuPDkK-B
zCYGe8=;!9AWTs`NrYL0QrDT?6rj&x_A{CM<!P+zP6cW>mQd2=QdkUajsNfP3TC4yX
zxmPGE%}Fh;lJU$-&M8d+IRI<{XyzE=COtj9TN03A&G@3!oWzpMvebCcKs;zb{1ywS
zfT&^twRdl^Lo;d=dwfP>UP@+OQ9LMXGR2pHo1#_1s>SiDneq8)@v51s#SlN<Vk%Fn
zvIot$rj~&Gl9E{h?x`V<V#7zHAr1nE0%!y^2QtfTrBJ0FlCO}Qm|IX<oR?Yx_F#T#
zi9%vtC1{p4Gc7Y2JbGWOrzr-m!@-S6@Tf1iJr*T|2s2o|FKPriod+}!2p38R#U&^c
z7s1QOqArk_I!G<30tW>hqA0z^mY!OYT%J+{YK?-+JxKY+R*;#T4JsK^N^=V|1;OpS
zTU`02B?YA=@hO=_MWDtFr1=ACk`?uV?3f8E85y&qSn@J+3yQ!+;B=6+0tJbcprjL@
zUs@6m8j+4-gA9CvTU=4>WzZI0CCCtP@d<8CfeCQ)K?+*nK>|0vBtIS=l4YR659D}I
z;U@-Z3o-IALq>=}qyQ5OBdCoe#K^-4lILRNU=m{DU;)uQOgxNIOrYYD2R!Bs8dWY@
z04f4OttL?M2g0DC2M`9gpg>))5{4AUX2u%E62@i*a7JTVz+3`ui7W(-D>H+N$7aSF
zX3(5qFH<c`4QL3I#qSnNYDH>th$a(g$^{%Cx7a{FPcG49yTt<P&fH>8&&f|p%qcFK
z0}4u3P@omxVod_oSGU+f>n1W%D>PY)W`Jb*APL?u9+ISs7J-7AsUQa&SLh8eA&8=Q
z$n<(iW@1U|3Q)*Fy2S#F0*nGoMXMMX7!qrDGcaU=D`SQHVlFOa1s8w65cLuTs914H
zX<C{>Mru*2LUCqZaw=@@31cY$Qul|8D<w5eAvrNQBQ+k}4AjU^%1TWx(E*np;I6z5
zXm|uP9c3F*RGO-3#igL20J9P_6#|~Uf}}~XR7#$LtwMgW9;lwwgSG)QKuH^HnkHCH
zW*T@}38pF)G<{aAk&*{B9b`j(v0iR=N@fvA6s8B#xlXlG08hMVD1{egmVn9}u>1A&
z^pudC4Y3GZT7lZh2p8xmsFx?H>nLb~=AF_Y-T_&N<`+Fs_Ckb)4rqG20OZ%q{5)Hw
zoT}VJC5RQ7X|Mo-yUQg%FI7=VlM6X?p?*XJ1jJ@#L_jOpDkSH{BOIy;T4w?FT6{rK
zYFcK6LTXMiq|gEfB{)=yQcFsU^5Ef(VGD{IKrJIA^C2#PrXDo!D<~^?LXsLdK_#Uo
zCxS-9AWKqU(GOZ5lwSZYQIXP=LP=t}LV0FRjzU^uW{w`%nUs1nGYz*FA>IH*VM$RX
zf&&u39{oj0>gbUVQVhb-;6w`yy`22S6pb|8X#k`(6+Ew~0H0n2C9hOe=RlKfND-)}
zEy-5^8LN<}kd&CB0Leh$#GsH^f|-y(Hh?hH2+uTdsDo0I0%!yvv8Wid9409h*U~OX
zLm(M6#adJeQks&QlUkChkXfRmkPm9ImS+~HDx~C>=RqBnms$==*PwAxXhH{B55f>9
zq(V|ZNC1SD6`(-}@dz};AZkF#77-9ob$<DIsa#xKTuMqxT<Mu53W+Hx3VI4^Y6>7u
za(-@ZW{HAsu7Z+3WYAR!CRI>coB?7h6l4~_OStsR67AyR3@d&8^vn{6^vsf+#3VgX
zB$k$>>ZO#F>ZN4smy~9t>Vrme!0Qk6K-TE$mKK0{5NAMh0!R&L#DxpW&nwL>s077k
zUI9!fzZk|Ut}NyPmHrBu`LJ9OTvC*om+t8gQ<9vYlarbZYVW{xK_`o0DvDBJtda~+
z9?i^4hYJ*@<igpRxp0M$V1~(LmZTPy<mcxUb8+QX7Uh@br6|}cK*r@1Dy$Up3iQB&
z8Woxf3ZR$*uL(=h;{uHrp{Oh_&DE&T<jPIU$<HrBk;yC20}Fzr!IegFYDsAUsKF7R
zR0)~R({R=SH6cJ^(1IM)yi>4MaMpuP!Ntex#e>VP_;_fs0Mmd}XMhInG+^R73LrBe
z=|~w=Ac9ui!Rn37Vo(JEQwp_H8I&ABrA2OHb}Fa?2R9313N-z}bU`fO;sTG;DtLg0
zqacYDR5NB|rle|srZAy~gUp5+0ICgP+I;fU(^HF}wFOiuNIR^{3^qbX0n)>S%7cso
znXFfmT9gY~{HI{6P+VE8S6q^kUs?hV9=MX6{B#9dsCF)}6v%W)g$*)8M<F-0xEQn!
z5aM*WMwr=p5VbH>U?bp)a`MyB<-rO-cBJK$7H5E+kK}A+$ZANioC0KcAGB^SFEup<
zw1^b3iqIHbYbKUtCS~SimQ+IO0+79+g)Okab51NMDFtO24bZ#;)Ebc8XrZSCnmyK0
zfGl>gbxX`CPSsJ+(t^w?L$!g-1_g*7M3sUqR5{o`U`fynwT32qB3T1!D%>1I@W#id
z=7E;<#e==CqmT@CXDVoMO@2`^v^AultdNqQr(U8^0O~Rnr9+lKf?Ht---2Bf4~bYj
zu?!lh%gjs9%vZ2gfE6MdkVFe|dS;pe*dU145D^WMLnIspTLoxp1S^J{i7*GXIL6S8
zstpnxAVry=b|NUIU?G6!g4CQey?D^tAW*phbx&$h5j^BUW<Z2fi$F>cK2%Uv$W6`7
zFRE0?$Oo-YMoavt;SOpgp+p4S0%*PjxgX+cNE%A504+s;#vwSi(bGz5MM-L23Rnfm
z2cV>wnGc#;0*@+bXzC#*5HvIq;h2(|1BnoL7=yg1tdLfenyOG*pirC-TH*xu7~GAJ
z003ohP&$AH8Hfv40gGi+U!#RJxR`(hBgnfTSA&8YmZJhaOHy++VD*?DXg&cn69M%%
zc!*uWRsmcCgPaVi?9xg>&31%PevX2zf&ti7ptaSxpizQ&(2NL3SkJ@=ECXtnLTlNS
zcxX=&*<kQ;U!>J_P&rWB5|pz*3)4XJ4yi?;1_@{hd67bTY6*1RYB41Ll&5B<XOt+|
zDi|t&ik(yis6alX1yl%~lhZ?va>(ivaD?i>mxJ4aJg1`oP61fU4OqyR<mV{ZDHwp4
z6N5t+7CxE9;4njr3s5A&RM<iiL}GDqYEg-gQ4n;%0~}bOkWS5kTasE_0t#~y&2$AV
zZh?3eWFFXgpb8S6njkeXdOZtLs|;~ld~$vfXbnGfRUA^ZVV8ir9c&jYxD{;S;fmxM
zf@Len9Uu%31<-sasOkX44YqIv%R@sI<WGoY;EY|In3fu!o>~H#Km$iMG=U@AfRaQZ
zDL)gkbRW}7qyj__Qy)Aakd=W=L^ui(@qYP5xnTD|It-u;prZg=L#U%rlAi-ki>R&z
zr8-+krbG=0ummLTO7e5yu1LvDOH;4~HSH3UiZv88@(T2z3&=EJSyEFkEhn)=7pw-d
zwoC)&3QdTd0-`j_E6Rl!S6o<B0x}KM6jT7)qNSx^q^Sw<B`9pcDFHkQ3D4jltwp&A
zC7=#|QLcg=$g`;4f%G2Il>G8b6jDK{89d+y?n&mP7J>E`C{&l^=hRv&<mDITf|i(r
z+>lz73L4QUPb^lb&MV5T#o8rFQ*zA#4^rwvIy0G|@qCcJVufmuA$leGpcaOPCc1X8
zhahnW&S3BmM2@!H;&fZW&Ia}RL16?+q7V~(j0QzoP{5H6kdgp67%=0tKrc73LIV_l
zNNE5R!AKGm#I$X#wL(g2StfXpRRI#iNG98&M;0sr4R~DJ*6Ju!=b<|loI#OeA65wr
zl~e$VXa!qH&evDaFih38P|yaKAvnt!s60j)qo7G_>H?KsNyU&vhA5LDi40T_fgJ@d
zO*BDuGkHbIkSH_?ic(?aH>_?3ryQi(0=WW0si&Y#Fi0^BN`nZ)aG9m75P;a;0BU(b
zW=la0vP|%t4rmw|x;qcF9uBlk2ecM0J+%Zp^qL4BSWqa+OV@<7*Fc3Iq<05mfgAz#
z4z$4m_6%w-4%usopkXM#{1Q*lJ~7aQQ)&u$D~5&=sEq~Q%LwjVA(yit8K|*PyHXGZ
zH<IDt@(rXotuzly5f3fhKu!iVD+=;6^Gb@L=0ZoFVZBc9k{MWCiXsSe08|jtSA%jC
zK;e>@lLO;I7TUmg@E#~4aTh>Wqk~$%;Bo~Nq%a$xjcK?z$lYLf!4$;9{fHC>AQK=Z
zsFIzXokApNb0%c&$O>!rfEfl29FTUnMX&&fM>H!CZU?DEvd9&>cTE9enSw@Meo3m8
z0%)5dc$W!ycS%7}esXGYacYXS0%#aap(M4U1Uh@6iQRcHCm=Zw+Kh#_h@pIF@`L3A
zc(7rN8E2+J#|*3#lof&?+b2^%`2jTYl$%+hP?DdX3K}tm%tC;+*`}o-(mu%FAPn_8
zTA;#$6XrT7AL=*_XmWv2h;opKp=D4}nGf373Eppq8Vw-lK;<Dx4pSPW1nT0LJX>l5
zv_Jq^2Eq^v)0DzN+x|+5DnU~@C6Fz_pj@f|8In`42IpH)>%UeVG#_0ADFr}7rR55V
zc?#ebi<Od&0@{!?$O;e!X9`$C0c8qs{|6DeAT=-y9r7(HN=!~oN=(j%Cli<g5DjrR
zT#X)NwPAc}MKbd607xYaLv$!nJ6vtC1T3_2&@j+ZfE6D|nFH4Gf;ZfuDGyH{3`40c
zcu)`2tASKBAWwqK&B!m0hXpfeA2vALp$R+%HOWE4BPI{Y1L#Je>Hr%CGaXAL!gOP|
z53Cy|1=5{c0CGPf4PYw;K=~b7VxzhR<Y91{1f^1VA!`dWA5r(%Ldzb6J;di^TsajU
zbo59}5X;h(KqE-7k{dkR0rEN4{F#yu-5H;oSdyFp@AZOO7Nt2QdJ15Zpg9&1ET9kt
zVTgTL@-e{(1nCB0utGfP1f&LQ76D0sFig@ABmaQwJ<y6)aGL<BPQ^$(h{S>LA`vkU
zwHh+&hU6AQ__$sGXty<Za1T7h2XzC;VbH_}&CZbJ#OUq>O|^lH1z~sSnGdj7EzihI
z&H$AHCHbIz|DgB<?eTP~RDf<p*HK8yFUf#Z$>l|vB_*kOph^+6-Uv3ypa9-752+R*
zTf!5I70Od{azK-3P|tyfREy!oY9eT{LMrIwf_w#N?GIXt0vb%$0Bz7m^_D_XYEFK+
zrk;Xl9%Rx|p*+7Rr5JoZ0K{d8qbxFutq{c%=l}$Gu@&zK-scVtBSZ|9RDgyVH9+fr
z;68%3RuIM}CgqnQrO&+5+@#c^Vg*|Tus#?!4Lk)08q|Q!?dU=cDyb+zDvc581l8{v
zMX4Z9Rg~x`Kuy%tQLurfYG_`vLo&z;61r9jklE7290f=?gBL@A!Z5KYvp7Ew9_+{|
zp$OdMhEH2-D4{0}&;kI^`5~#Ww2}!rO#;+R10?{kA5fi-+%`v*0jEeEkOy=WN|3#W
zC}1Hz0V#n*A7<i)8Ukq~L0Zqyi~vg5;3<E^loC`GgaXg{Lt+A9AxIM>eSp0J3a!$T
zRMd=wNF<=BR*K0}(gV*@Ld-&yEy_jN)U8ph32sN`=9huCl0$ZTD<mZrA)1WPI99M#
zNUZ?1DoWy^A{xa=MuWzgvDC$ov{H;nDey>!h9uZmXwd*pU&wl()1UrDDWJXG;N3jF
zso<oo0XmQZ8omzT78zv14{Qx6a^gW<(1OIG)OgrL8K|kIqX3#<Erx1@gfV!f3es%2
z9@ILQ%;Xa2JQu{kywq}RhUq|R9MG;LBy+PtQ34*<1G^3+s|VV!gxnSb^;nZYixR<(
zLXG?EVg+qmg(SWB_<V@(p>xw9H^WVgR!5Rnk5#Z$h|W%}1ZOW0otdYQT@0F?25~^`
zhRi$#4Ryq}RCOH%b>xky>YB0O@B`TajV4eqgC@rbdmUaIK&L+3i}FhgG;&i*62Z>3
zg@!6}BtnA=JX)<_tDqE`2WF<g2l17_&H<eflL=b+84upM0a`==o@ND0mVh_)f~J2!
zYY0Lzb5j+HL0eB0^79lx6E2yFIhj?EK|GCuoW$hRjQpIG)FNwz;?xqzd@*RAtq6R|
z4Rovk_smj8VsSj^aH7-{u&wb5wh9QBfL9P8cYou-+oxb6VAnbn6y+DB7L`<jd7xwk
zRs$|OprHW@d{DCyoUy>N3Rnl=!5Ii{MKmJjKnCPOatSCwq(j!8rk8*cS~PS+2G~L!
zh4d0laAJW-LQF)YH&Dt+F99!!%gj@VR)VgUfNWS+(os-?&4eq(#)8yBmyhUW7K3A4
z17ter_@B(;;?g9Tkd6YRjezQ2P)_g!IS}e_)Vu)l5yWxe@X!M-Ey_#LQ1H}%P5jt`
zq>&~;p{W-nss{-pNU`kzF`zgVbSe&(IM9LAP)JdMWQ2k(L>6ikMpS|d7;wNMX@|!o
zND^N}LgXP6D$sz&HeH6|F|ZzGsB=IZJv5Jj#K0~B`wPig5MP0$A)bPm0Zxy3nI&K?
zNUD_;oD=gv?XS|}RPdMsDDS|xFentI=A@P-=0VaK>S;od)uYL&h*|~IbwRZ)J~1Z;
z97pl6*n|vkAvpk)JU}rAN+DpUgE+;gi3B>70Fu#z8Kj4vVNtb!(iN(DP>BI8^Ffj@
zjH(FTPz75B$f_G?brl3YRZIiwesGZnD%~KfZxz6NNWli(?guL&K(@hL4yrw%c^Y}S
zHYh_wat`L|ZipN>QDdzUi&BeIOEi*mkZV6sz-kog#A|{=HaVvl9&(t03q8_Aqfkdd
z9c+|3)Po>r!*HP<SV{xh2m~c4@R1<sDGHj5VI@X-Y6)n~Ah=S8C`C2^Qr5z%amWH_
zNCQhp0XnvfK66=~UzDAhmkwI<1vwZFX(1Q%cohxUaxK_mU~s~MxK<NmUa>qSvq%S$
zJE6G-%R)1d2|A$0KdRk`))I8G8`_vlfwtx#N5CMt5Zs6Z8K#GBt+s+vJUrvYr{u@y
z<(Gi>CB$du>7^u=z}mQ=_{dCyg$}3#1*-lr=a#{i6sMNJ#ve7H;i;nl4Q2?1>;u$w
zI*?^;Nb6uB$J~G;5w_G3)t^YI2BI0{Rt$GQ78rwuTk#o%$C)5gu{jka4$53<N_Gn2
ziA8ytdFlAiQ_@kWhNTo($^p9;Yb0vcg2Ebl7!IU&0zP2}lpZ0AkNx~Zz$paO`OM4%
zt?&VLXoFG{i}Uk}!2{2r)v^jnMWBt53ZN}1Ik5Gl;8u(tr04*70}*_nTBf8ZGr0s&
zz9A$*O3`~$NG*7TLR`TGG5|6JQjC->a}zW3z;OdxiJw_gX={iQyFn<YC_=j7#U+V(
zDTzfX3b1uR;4$|M&>(ACVqS73v<(Kb6FTY%QwT2EK;odKz^S0MS}+;#QX<G6Xr#g$
zRJIlB#DhvUaD>5&Rzx6yOoQ42T0{i50VG}wI+8)bRskfT2O=~;Jtjwm^!)r3(4asC
zq@4{e2Ed9yF<+98Y6fWgFlfCh__ze{>SNGI6Us(xjY3530@;h?sfrSiYRH~z4ai&%
zD3IYSO}(PhJPq(3WmGf3yVB9z4l)~#brrzgLvd^&*q0z(pgrw~IyxTgMOaw{j&h_}
z1_v!TEr1vCqDFaU9;9!ikX2e-0_v87)=`0vdIW7{g07V+Pps6_Lo^T|)nq)l08p@1
zsIG-O5)z>bgkv;K0lBh@FHS5=g_wu6$^tFBL?}T@K(Np%)Pv}bRtF^ma5oDQR&bZW
zR3f`Qx=;_fZw2a{!3;vthnnk%G^tuW541TfIU`j8G#-T-MW7T9N=50QgHnw23M$pD
zK<yJmKtgL@WY;318@V<BMJ~LKEryr2;64vh8Xz`eVE$LIh1E>O=%EC02P_nn!Mzhu
z>kH(kLOl=*tz`l!zB1Dk3iZGhGpL@+%!8W`8u16|!YUaJ(G!bWAV5q6IRRErI6(?y
zSjDRV>db=<uq&!mC@xLP1&um_o5k=z2Rj>9-Y8T<tf_@8-78H(NWfiItEb=wI+zVn
zPk{UZ;vtpyASWRlho#I%RRH4QDfdCr2-7fAAwmwM5?=m;I4}%Rf=?^hgSglBL0cQ3
zQ~-*+*l4hN2vZ#@sSauHqS^`Kfz@L4)uFnu83|fDtDywS!%8{|hNvYb#48|U3-t(^
zNt{8MX$ne+l@2LNpd^gPb)b1&=ls0l%#>8n!Nj@wWuQ6{y!;eVID#CAa4Lug4h7Iy
z7rZ?JwF5PiLBbTQ7{XLXavIJc#+X7tvpY>e3DTV*!f&8WJ_V^o8HokOpsr+5Dro32
zMS-9P;hqHBM95cQML7L~eYQgZ+`k27I8eV9bY4YfQfWylWCj<~hyxo7s^Ak9(n|Bd
zyJkQ`>nWgyKvH5-PNhO|Mt*5d3V5(2DHW*^19CoUsDXRHAYm9rl~#t%R464Srl8Iq
zLwgU<_yrYCFeY?69r_qAXjmUowka$4<`<<Z6y+y@Dq?8AQ%}LM05n3HSCUhykdvC1
zUXlS?*qD@9qyUPnlvLO_T3KRIW`1e0LRuo|kZn*skgt%O0XkF^RPuwzCqWvED|3_b
zbD#^P^S~$DK>DO0kAhT!TC$)4c9fOt5C?*k6eNRYI&$>VGV@Y2lvI%>VGBXazQMH)
zyy^t?ZjwtdS6yX-np;pKYLs*o3X)5-(eip`nnET>Be*<<SqD;^scnd59T&)HU<ZII
zQDpl;DW)(7;eL=ph__)QR5_4B23p@D2IwKGG$9)?(MQ*jV+}I-gDqac4gnwclbIZ!
zoS#<=>cfFXwLlBz)uVN_W9=0*?5)5!CPhmjMo%FoMO#5rp$00Xtq`MU4_XQbQWfo*
z8mkbkts83(Rb{UZwxFm&!B(Lt72K1{%t_V2Y9VOmL@&%W$jLw072;vg(do7dMHS$9
zNX*I60Qm`QAVelQEhj&*L?aQrmsCStPaRZ#B!cQ{rBrY^nWzcbRSG)AIT12OnFv;s
zlLM&^GSfgQRwFSd2NYwb1_p>|fK*j*8$q#xUJWY2Yid0NtDw>f)+5vbodH`3o8?ke
z(t%qB@gFoo6LWI7V8h;y$)Ga?0-)#S!Uli910bm57bT!(RANyHc&CmIWH`uHDYp_-
zlz`9N1<k@JA+@eRjW9h>&OqCM5?@rB2kO3}3_^o^3(){wqYf^)z(Eg}N&y*Wpl66G
z3Q9Mi;D+zYgBYWr4;l4HDM1=ug_T60qQX|8q%b8{4@78y`yJr1SiL+*NP#Q`w^R~~
zkUFOzQLypgUI_AN5kvqahaLh*-bKlXAhmj+RX2&Ci2(=$bWC>$c+a9nNn%lYY6-+m
zMWsltLei%Pu@&KBh+JlFYCKpb6>M^5ZYsEkgzQ}CjBE+UZalCFNKQsAyuh*$r$T1z
z5LRa*IxiqqItuCqNr^?Eoxyo2keC4RK~)i`_`xxv15%<_S^&ECKm)Dt0~x9ZmQT*f
zFHS}4?!aZi18RuIIAVj49(a2lk~U~w05z+i!K0uHaVSU#7Jv|=K-mG@WlhaVLyupO
zn?RjUtceM-gWeY54Dd3DOwjICTLVz;Oe{-<4GDsd!iU5+IPL2w<R$0l6x$ja80f%L
zJSYKT;-thP=oxLc>eV@*v#)EblM;(+YZRiZQ*#mvic?c+ZK{h>LEYWVy!6^wb#R{-
zv}`o9B)&K~F(=hFr3AvYRfit@1?qFc?FH3>;Qnh7=+XdiUs^9u*H9DG_;Ueo*n_7i
z@Ny>5X6T$sYlWOt(3#hu?3P%fU}>oe+TE9`P?}eg3A&>Kyiv0RX^<V{HRyOaI7VR!
z5m88jRDdv88Wi+;*pdT^3glsGkOT}T6{RL-L*f@E1d4gR;+)jf0*Dt=N)QPgqy}Lt
zB9U@&L3S*I3S`ha4?Xa>25k3od}eW8ehF9zR^s|r27`uI5F5(Cvf!~Gumm_zLHjw1
zON&w!%2O3U8$lsce))N+po3I$^NXN?25ysq_YcEnEWq=8pwye3tpO8*CRtF+OGy_x
z*PfeLpn=%ZTmhOZP0US=kJo{W+T|suBJDEHEy~vbjV*)10Ge-#A&mgg(q8ZsSv+WX
zBt9iI4Rp#g^pHx>ENTYSa!?9N&H>+MfHZmzG8i-gu9uk$*=hw<4YDXR4ZIl^?lMq4
z3L5H<j|cT*<Ks~i1=Lx|Igr&bP%fyPfjAUA{aFN_>(nUD&MdGsf?B1dq~xAjqL2vQ
z84YR<CMrPGg6spW$APRz1RY*env<#r_N*Sr&G{+t#FGFrF#$Azn+!Vlr&s}Wi9{}F
z=}S&(S!zx($k^hN#N=$ySQ1FBLU~4JPAX`79(dU|#6nnZ19f!t6hO*s4Ha@z6Z47{
zl*%(e<0d&d#R|EpN;(ipBNX9EsI!$6QcIGd`}Y(S6ddyuQgaJRD!~KedFh}67!o$1
z(}~i+1{I{HC_vo_>S&~<fCkx;vlTMHdj-L+f~|0Yc)}KxMnK22gDRlpY-phhu5!Q=
z+S*`a!5X3M9k4!41sjmbMI{Q_3Wl)QgE&S(9nx-tH0!}b&!9EEpdgC|n;RQ#5DN)z
zQ0O3<=egjMsud6}N>43;h-o011<A$A3JJOSDIom`;EqC4DtH4T<em(OotdC>m{OBL
zs~Mp;qvTYA(gCQUnxB&j_L8zfh`)=!MoDUNMoE5NX11o40!SL%|AmYd$H#+8TIj+D
zP%;KP66RwCa7u$UwIPm$CvuoeQ&LM(lS?3@kRTU>ZO%+nPzM!3;ASV1_w>@hXF%m3
z4KXPzc!J7W^%8~bynN8Q-b#h?MCeHs5R*YITFBzVj8p~4F*V@)mY)K)3zP$u6(S-c
ztUyO&fY+6!=E6?J$je7Ft+)hq{Sj0@I3X(NDrACAf677Zv4i#(lTsnWsz_Z@Q2K}4
z3i2*2bg+3c8WIMvNaYjk6a+}<fo@9y83OKe!!HZaON$3d=q2Za3jwfopiS8MDd2kp
z6clY0)NLTKVW(~dT9i?eUjWG$3Z=#16THBBKmh|X9p*&v@@bHO0%)8a-pkG}0$nQv
zKC2g!Zk3f49Lw@EQxuXji;_!o5<!C{X~p2a93<`Kq$XxV3NK{^oLA`>>6z>2WF{3Q
z7FFuQ%F&FH+#KbSj8t9Fyi8F+QEEwQkuEsLfgKFmSdA2p;AVU>wBm#2J|#V9tpM@|
zw0nmr*+Ho!9_&Vqv?6G!2|u$4>Ikq+3JQ=j1>$uS;=wcbrK#Yu8Z~<+7NtX$(}7Hb
z>Qe^QbC9k%xX8^cDh92-2MsNjq!xjPCBfn#0mMi~CM38(aRqXC3N&{>&4d;`ASY>p
zgBiq%Hi!kSW`HCiM2-YSsxl~}gK9Aa&^gOGP{SazUrDJ7MX70!ZIhr=-5?7pz@CHJ
z1~NebR4$f)H(f%PT!2-8hut7H>VQieNHT@!P=KyL0c%5b5JUm!fD!OgugsESNO^!_
zBTP|dF|4+SSNl5P<-wVG>7ay}Sq#d@3W>=jrHMHZbKtH;)D{raz+@5RC?wdz8<^?f
zrB~oeFg{)#)jM#Pf@)pJic-+dQK)mF=7Hp(EvAyn0?<$)bRx?!uM&R#3gq$+9fkB{
zNadZLS(aK_P?C`f%3iQODmX<VB`xHeL7-I<D8nl#*eZaPLJI(}b3mt$fto`w2@pRK
z+H?Tf0a`u=YSdR2q(WOsRtg4?Xah-uv_e-ofZ8=6AyDE;PX<MGJUFEkr4|)yfb557
z7;sF34Auat*96tXAO=VTHQqDRKn(-Xcpg|Kc)=3Lr1*FxXn=$Cf<#dDfuaqYUa03#
zwSxqa>I;xw7zVkj7*@Q2I1rD+q+v8Pe}NPfg9bC;RXR)of<}&ekSLT7)(XmznQ7om
zT?`pKfg}+~s~Ty2BdkdcT|o_M1j3rs;B%gXz-K@sn$@6lp~3TzU~y<PfO>MU<zL`7
zKBzb>N=?kwQ2_P7K=;vQ=I0e7%?(2}f}CB9z9YZ51iw~rS_A6@t%@ryO$P7Bf~Fp@
zB*;DBfgkX;Jm>-<1zkIEIe~~?@L4^es$VZ9zoZy6-~{G?mghU>APwa~s#u5|*lm!L
zXmawP83OJWgq=Q_#U&c*u0cWmLF&+zBH#@LNK@@#^U!s;IePlQG=NSfD=kU|O|!#&
zm6(ze4Dm2DoI#x<EBN%e9;lE7cY45QX<+j*EVz+O0-1qGjbL}dLlu@{QPn6b5Hto7
zXpp0`LFEZz*&le`0lunD2YQ|n!T}JS;K%`uYCw!Yjx|ttLll97Poub4M<F#Y#a0Pv
zkQgGbjC5KN##jYJ3AiB)DwU8%bU+q>urfH@!7LD=paCrn!Q&0!J9sofUMoq>1s74U
z-I&OmjBG)v3BKh>2Q)KK4qqV+k%Z0F=s*^L+d?SNOe=VwoGqw3nwXQLqX5}%V+*0s
z!>cepItw(dT&-TNZlwSYW5_8apg|+(9!+&C_<k!L1$D?FnyD%3Rtj)`sY60r-AVz%
z&{0r_IZxdR)Z5Vjn+*2>bRmGIj)FRPd<AATn1h&Oh6Hs<acVLso5BYb!6O48LPHbO
zv??mmQ7}UI3mkIDcaOoA)`8<fK|vwUN5NLX0<jeZG_(i`dt^O`bOX|b;R49mRB|%x
zfUA;J4NVkBfOH`SnlRE3SQ_LfaIOJ`4dkc~$i@wL@&PG>rd3dxg|1FPTLC=ot)&1u
z#lS!Z<TG6b69rxHNON(qrluxl3I~~qNT6UIhyb;Guw@&N95k9yw(5e!VVm$UB$O3k
zAqF{22c#H;i$Pvi3UCY#Rsz`vH`f-Pz95y366n-iP?_fHf>@~pG9WVzYO<ZJf`J~E
zO0F2Z;$2BWLrEK)?7;5ORM1vXQh*MnX`-444PKPe5}da|wG~ofiB$6-N-=~&)N&3S
zs~{oJ%?6+)_=sV0_{<#mwiZy63)GedsRbSKNlGe43H2-#9wMQDI_{vPlb%}Qg1FsN
z17sh<E)?Ym4rGQ+12p5L3F`5KA^?QJzC$R3q*>77R8XuHs%Gk`W@;*^7Aq;JDrn?^
zh8J`cvLF;f6T)(giE=|k&P30th;&Oh&w^c<oUCB0fHtdAVvA}eZ2APO0pUW3w~*YK
z1G?rG<bN23CLK`lqH80NGqGtw$*k}a0PLuEP;`Onn$+SFNdFboQh}`32?igj2TrI+
zNfZ=3AWJ}V`emSLS5Rtp23=$W>TrSDbZ7+}Xth>6YPcxaDxl_Gv{?m2mtRLg9p$bM
z&}cO*kYF^50$8eq*XDXC(s18{G-Iv|0g0lh2c<om(mY7F6BJf@3c;Xr%3ycD=@nGk
zDM11bq!)yh6);QypJ@h;Q_%W!(99PoN20g|dvXF#SfK?TEK#A@f=EmtCxD|CY+`aU
z#BtzQElLI7fQu5Z3d*2sWK$r6W1!{vpmL)Wayl*8zi?NQ8u1|a<)*^o1GPef8wzqQ
zTp_#`hKP__>wyCpoLu!lwt<2fM;!$np0rg!DiA;}fkYkH(Qvnb8UsiZN1#=g7!E@(
zOh9HqT%)9*q<~t7=A=TFrNZ5)393UhP(lQxA5_sOKx!IDaDbE{RW+bm4kDodsj;B-
zo02AY(g(5PEHe*!H@F^bIR;!MiltBo+kzaN=c5Vjyu+tEK(o22MX=6UJm?VV)SNt*
z)a3l4#FG3XNH-44ajwu!2jH$7sHlK07loS(@4l6SZcv04G~nclIxmg;1Uk^T6}E$9
zL7@o3klq{i(;;D<26$f)Y7In>Erdck_YNsHfeuQA_C-PY4eD|f3qj&~5Hl2%!E0|S
z^Gg-sp$?V7et;>sTc=>FfDzIf5N(JY5A9`xEkn9J3#ty<*F){~g4H6{=|V<d%8Ni7
zmx?sf^7Emd0YyV@VtQsWI9EXh!9E0)i-f0`z}6s2V$duQY<dY~3ar6`)CEn;&xhCr
z4kWPMdhwvkjpIR^+0yd!VLY%r)aPJj(DVu#YXG0=3rnex8N7I?=X4a}LGc!!om#1*
z5ag4eoDE61SjHc~WqwI%K~5@Hyff&ich9^u&>5o$aSbK7xRQ=Sv{FW9Nik^O8EAcd
zDuk6-QJh%?n#oZDwMfD2Sm?kjcm**iuyqtbV?!yB`C(`vD=8@{_<}+na>~2{)cIgv
z!>(UX&n(F(1s~X~54nV0KN(6V<>V*ngXSjmeKM2u;X$HT0NK|Eo8`%cxDI*Z6cpo7
zyTI{)@8n0&u1)A6gJ8eJXQpU?3MmaGFjomFtTjMmvN{ToLuaA>MOv?cG8YWi3|cY-
zx=FTJ19ayeYKsCqstg+KgQgWsH-du+l)^w5k_aKD7L=BN41^9^BeD=?O8{mGvhMU$
zux^AlX!--I1=|S~fQBG^@l7bG-~)|+AdgxVmw-f|@eSI(pA0(r6mpiKg0g}m@-kk?
zIRoI@8#2CITAW&hxL!NI6m+F`DtP`8G~WYj_ZNd!2WA$7PbCBGw+EM$AW3j&K$dMn
z){aBd0BDFkC^a!fH$N{2G|`xx0iA$`s4{{n12-CqbjzV98i0+16pci0?E+8nLLCk@
z79512C<DuZ4aF@3a|^^;P`U*lYY8<GoOq#O3o;R!!ob2PtxH(It82nX+Ykj7OcHy`
z7Lqg|^Vy)ikWi~pHw8db1P--Gjt9++fs%<YcrFR-P#y3IVh{l&zkrrUA+<{2p$6HG
z4{EeQv>=TILRW`?8v2MqB?Vmt#F8!W;1<{fND4$`Imk!}SPFT!185eY7-`D^)C#z3
zib0E#z}_y-$S*?aA|f&nSRRtL;z4;+0qk!*aMXi~6Zj~OCbS*{cUU0)16{ibY6XF8
zf;bw|uqjSKSO8rK2G$jyR+<NjQP43NpjCsgStSL?kPAxk(ToN=4dD>bKvq#Ix)z8$
z4wplga)ZW6AZv3#H(P+xqOw9rMrJW+Y(KXWyx$)j&5*eoJ%!+Wh4PHV5|A|LGJ(V-
z&{mYp5_@pKf|5DJ&P>S3%Sg!r)E@;q8Y%rE-Rcb153(L~U@1f$BB_9a60QK6WHpj?
za*_5^mF8s@mO}TbfT9Lu1d{F92Jn_XsICO5K*R#1ra(%MpxKm6&{PWi2!6ff{L;J<
z*qjf@ogfU+m~8;+^k(WH3<SwRvoy#Z&<!v~_-YN17RXW{5DSE%{>)CTM9PN5Hac-N
z9O38D!GjaxK+vWzNJ2*n2Cx(~K0z16rh*2skmfy*=cK^qf#N?tH4n662x3@#D&!Iv
zkV#0f0Sz12HY1QMYN-dd6nmKmmLjgigDM5rR7l<;&UPa_)*F#vKd2%AO%frmdxQoK
zsKN&MT}J`j-h+lLc<mrkV1ZQVVGUJ8f`g_Nh*Ct64-Np(NIRre0kr@r*FYTtDj`66
zp$-8z7)W#hNIN19VJRMTo(8CB0-K1GjKK~=WCxIDu-znD3iUQ98)oJ~i%B%!qg#rU
z32A7n5yD!?dBNZlh@HXJ2WT;7QE`ctf@5B#jzVs6I+&$ktDvW+r-?ZMl?kt?(2F=k
z^g~lL$V1SOM}%!=3R0kg$`xow8oGN|2Qn)S+AF01>KK5=SF%$pm2?y`Q=kb2qzP;^
zl2<g+Kx-pll^!HILCHBY1?oLbu-6by1RH>q+z<*8K?Rit?_~$6M{+ncreRx&VVlmO
z%RWF!9IgwI3N;l#?R_Q0{V@7UpdkzJ@E{_YfY;%~$Ad=7^$N0cpu=V0q6}$YBbs}Z
zL388ar3~Oj2;j9;3L2Susd|v=u_Rvsq*<XPA89j7GRTdvdI;IGh+u-oB+Q?XyHH>}
zn6JU+Xd;CUD26~+Yr*bD!Jz>X(jdk71~efKGeRHA1RueaUyz!o0k$4;;THJpC8X=N
zKxTt5<RTl$ZCjv_L&OO*eH!T~q(PIHvVv1-3Frt&@M^fkoZ@`&;WFSf1v+XjCo?&-
zBnNuU2sm1hDl3rTNC5#K1cc~TgtWUr%Y(sPVTHsT(4P28@EuK{jpN~|kcBm%`5%y}
zpv7V(<(bGQZh*W4Nqyid4#WjvWd)2~5#TM<DX<<f_~^Cr#A3Le#i=EF3gOVUQ+hrq
zqvhw>gKiW9nFqq)fW@CapqUqB703XHH$nX-B^?E@c9f(9O9#+e7@`2tbOzap8LfCO
z(n?7zNdzrt1K(8$-jA)3hOiK190-Hr1S7&hhfgP#B*McGvr7y08nj$eQ~+BB&M~0a
z0Uy4G@FaM#G58)I(C`5?1wf(+yoLiZWCR*Z2}(`R1YKZ~nU@Y94b(`>FG|f!M+^@_
z4S<b2fP4`IUU*cjkf@NHUr-5JI*YDJ2ef<^a=u<>N@`w7W^y8Ui&kn`Y7t~417uEl
zVx@)_sFe-5NerC6K?7NNDIl};QeY#LpbaeGvyDKDb0MAJoSeiYw5wAJK+CW4OG_Y~
zW0-qExgsyM9LayBdC5>ef!qMvp_h{bG8jb_Wce^60+kh9D-x5z+u1U+Qx)=aQWW6M
z1Ko2}0y`@%J2kbSSOIjZSvDwOz#RgpO3-><M7J#nJPv{y*5K7d;6dQTf`T0IF#srW
zp`(xkvK`dn1i2VJ)WNH{^HVex%E3DZ!RHL8fUX?_c@%_$kiyhf0olW#;RBG<5iW;1
zIv;VNjj}?JUVc$#I`r-)Q0f913dYI`x!`L{U<V+gngx#b63|f$h#^#HiVT93yvS~W
zFSW%ILU1{7Zb8$PlAjD6Y(x!Kl#xee1r06D@KkV#nv@E@v<4hWN*R^unW=f1$x4v;
zL^fNYxCC^)UuJQhdWk}DW_n&?Noi54F6es1)FRMsS{(&&{Z^S@iYPQer)_}JXh}Y}
zLQGUB0`02EO;rG$-3Ce<1&N@=k|n7{#d->E`9%t#m5q>V>%jX7G7^)sGmA46Ad5*e
zQ;YTV^pZ-!hi#RBdZD1@Igpbe!9zZY(7TvG-A_=|=qMEDD`b|aLoWYFR7lDMEiErD
zg)EVVhCZ?<xwt@uhJvjE&Y}S%4;=!9<d5R;%#sX5)=JJP1})Np&MQDO87w1$k_o)v
z$jO1OL<Vm+aP|oXUy7mN;_B=l<QU=~1Ukc{Bm?49kjDH{(C(G|;u1uzLs}Av(3T6@
zgbE6H1*IZH;fBzpWUYXlA3&Q?LB0iT*+3{l(W|6TmYE1@&43de$SoiZ*h)@t;aZZJ
zpNHf!=*mfGTMTYQ4yd;XN(Rszix!U111a;10--0~K|&R2*cH?ZPXqPBkyn(190#c>
zp!R{<evr#n!CPiE(m<^~Wd)D?a!~03Ixzz@<Oo@u13wuLy2}<8Z6KR+6DyNaq0tN~
z#6V|n6oWEVW^ze<ydJm*Q;a+SfYgRU)vKeR4lh^GTT!58_$U@37gn~2=s~Td5uQf1
z2~l)|RDf`0W@=6f=o}m*zkn`egvL=G(yfP}R)a!GVp1t+4>@c)0>+0-JAih;frj#6
zvM@eug=|4(T4s7qY6*-Fn>FKt2|(ir#zow)m{^ovkXTd<zxfb!<090JAPZp{Ae;O^
zEgjg6Y*0mz?THFW$$FqdCO0z=bQLz}q`Xu;xZc8)T=+$9DftQz|AGXeW`OowD!}eQ
z%}-N+n4$|dg^LU9JkT*YFsJG{LJOY&ki&~Kkfy9`)m=g9Be5hE)Q$x$sV~ScD$!Hd
zQAjPw%*ju;RZ>!Ntw;pjeqg1*6#_X89h59VLtNm}ArU$WYNf!%Wv2j{Cs9Z&1{K4g
z`w|S}Qwl)SBQU+-!VrECh7o9N7{Svs#Ak+rZlQt^9y`GDAS3+pOHv{Gy^51T3sQ>}
z;Kx9Nk48c9ssiY`ZqR`#$f7z5rJ$7~2+JTxrWnS9^@IHbzWqF>QU^5q2(k?{vY3W!
z2B@S5onQ%y=0xc3Cj|}Ap>Cjy5p}>T>Orj~h|$n{1r&-PC;Wqsv@I$DX$4g~3Q%2;
z`b0NZAs>7TDR@~wG{iwS7bK;EoSjyhqsIlRun;TGK})bQQlSwAxzrUjPMW9SlAjE@
zAy7X#zqF`0H6Ch_KEzSR;KhHSy@+}Rl@J#vpejrN`7#j}?NHrdXXYs+XMpxPLbm~d
zuG1^YS5QaTm;ereqS8Doq)6AzRY0{s58@I-E-uh{H?SGHl>FpkJ&4cs@{7{-jrA<`
z!CQy)lXFu*rKuihPY38SR~=9w<tBnQ$U{2Lw(wjLlvoa$Blk$nDR9d#f~e7i<{rJo
zl$3Z_ovop+Td1z1kO!J;vsJfOhs?8q=CDB}e_CQ`P6>GEOGg28eVnbjKWL5zbW3+B
zC|`kF1MoDik*b%jX9Z5Gn(DZ0(*>=Egq^RfuA>0jb&v_V=NUXv6<<<R3Yz(Z+7=Aj
zD+fwQFb(=Csl}N^sVSgCi%WA#@Y@1k@=2^MiQv<!AqgF{gdV(IwX`I)7`Hu;Rjx$&
z12WAHO81be5<Id5x_&ni)aw8pE0v#;irXgW`dLC&IhK~>=Ylp|fex1iUxT6mFVplu
z%XUE5%9Mkyfk;VBDosxZZ7Wv?%`$*HZ?Kq+F9M(U7++jc3hG2@K)4t;{1t*0nnBYC
zBqX35sAHkL%=EncqSSckNEY}wLde85bRls`eh%oWPly<_@diqepk6JwSl0uW`=CTy
z3>`8CEmeUwQ9#)gw1f<t=Rn%@3PD1k{wYWpQE9^ysx5dgl3pRaRg{^gpri*rqyeS|
zp$k-j!$fryz&-|TF$5hePzE{0p$ya<f+>NmQq)sII(QhfUky_ZT6mpT0zdN=I{Jdt
zih+f^5-c@C*D8ZUQ3<kn3FKASr~>q?LWnl-85f`pi%?CFQyCGOutW^P;}}CJb_(E)
z2>6c{RMG)m2n@cF9(J@KUbpBdD8Xwm@Q8?>f;0FSmP*ilInYuUvKuKU2Ygx}>^w;L
z?J=O(MvZAmT2z3>GlU0@Y|sfSiQx6<n3|wz7N!fzg>(`jc^7n2JgD;@4;}@vRY1!v
zn7ITN%g}U=yrSC{6loBbK~t42IBmgpm%z9FL*0S2ejc<q99AHLiu__d<T4d%7c99T
z6u?B#7U+Y+1(e7MV~{oRu*{0g0hJ#h?aB(E)0IFydQc^e+z$e60)@8kA#=mf<{qeV
z3T}9S*0+`=mOwgQC`vMma+38x*Ls6?k$_!`B3+SFo|%^pnPi1XfZLRyLD7QBiX2!g
zIXu58J1IXOa?K1x9n=X0pu-!BLH)i0$o5u<EZ7t*OMIa7HK5reB*&zHCIpbA;InYi
zv5@`%#8B|=8F0%3lq*5y2;}G&h%BhblWnM@06LEra(rzn_;8xUl2mBg(u1Wa*nzhY
zji9gxYc|pW@0kD<NC>UaGv=TLD?$fIS4v8>I?T-YOwfFydMs?SOg8kG4NM)NVjn{_
zbk75(YMA*5lOc!4AqNb|Eg%eX56E<cS`ZI>(j6}4CHXlB)drx+eNfhdO=E(zgBAqj
z=cvQZ-vMoODanVPV}$Bl)cAn~1TI5SRil`XDg|{qQhF%_X9(~?MBrWt=ulkHWCwUM
z1X^H%7L9?#^eS?SD<InhAS=!xZpJ*I7GfwyJg3Bi#@9iWX^I|*1-d>dHAe%gTCXG@
zyb~2#u|mv7R9Fx`IBH<VfRa8;HIkRW2@sU>Am@M>8W<R0I0ZB*Xse)JRGJsBu3cPG
zq>&8`8juBGoS6oicLf;(aW<qrLbw8?C?y3pg$W+XfGCUxn`)pQ3m+9gHyWF#P>n;4
zH6*`*lz}i}F#vK*fad8yWs8EX0<4&Ug%3y*_-x$bjMP-nA{>}ny+rWgN=SBq!Y?^L
z2R!8p@;@R2r=-BHSwIgBkZE9(^*|@sYZT>|+Zuwd!p$ko&9hC;&(Sv2gan6zEht!#
zYy&wAWMff&Ikqwb8pIe82-1ed(xUuwZ6jP3gR&sRW(Wx~0~{uxFw+Cg$idPgHhJVT
zTws9?I*Ov8M8Ow)c7k7ii5ut=7;xhRRE(!0hZ1PK%gERS$z)K43aQnxG$J8dK`w+e
z(v<>=^2;)dK__G8!0P+VeB{ftV1qLnCGe&JB;kP!0gHiFP{HNE4gpC)%tzIYG|mQ*
zR#JeqcG5tj9?+2+(C8X?ToSxRxfrym8+_shXt`)=UI|!KLsK)_FjfyVRRbzVz}BXN
zct~e*gRbZRNrE=pM_cI{$AU^5_&5Nh#6c=$z|FH_4X`QDgn{q~=$2vlKud9HPD!x>
zcoqOu7FVX0D1dJrgtZ7jvwSgmpdlCNP${I(1#e{)<fJARr-Fv9LF1p`UOn=qgwU|D
zv$IpuQGhrH>^~&ec;<n17AJx(hED{o5(lLW&?tFPB1kH;7;LH@D0snoi@-BW(0*kx
z^zgw_Q2SG(5b9ZFg$xB-g{sT~jcAR`X!UaSSRIATXm!}OGbk6nA2AlvI{+_PDum?1
zVhv4bb09;(mJ&n2P6oLKbek*mGT01AvV(dgBic&GAXY~KL>oeBqgWmIu4ve16wojM
z^_SNZUg-px3nbWr2DR&9?PCZVlzowI_Jk}*hqsm?J5%AUWAG7W=q+N1Af)XL>2pF=
zfQC5Y({d8iq375^wx~jtfP);A!jLhz5scET2c6@F{Y);fT3G8D>;`DM0v*C)3z}I?
z1&y-m<>i+{Ymda@<jl<YBuMhLRe%lb>OsbLp~re@D23)_mbin4`IJB-LU|>&N-6pI
zxzJt2NuX8t@LQA=qH{olMhf65QDoyG-B!@>Qn3d31Vhl4lN``Kgv<gBP0$v8gIMSd
z!;svU1S%^O%0O#3;^U!*Z^p+%l3+?nJm`%5ymZiA<@x0xQ_?_Q(@;`P(O1pYSB+Fq
z^{`U)wNed+x)Nfh5_l70YKoNt$XK{35JN!)IkaI^0@|hnjt^`dWvFAIePgH=s354<
z2P%<3NeNNZLpN!{)PO8U)dsc;>PG0GGHCc4q_9{=K?!tQuY#^FDEWXx7czudtO?##
z3XXP=6gXu;R-3}Mc*8a`q$z>c4y2|iKt`a7!NcNK3QC|;D8bD`Sl<n$AFrbT-U|tK
z9Jr!^^-H56oeJa~MxYo;ECMw^KoJ4mn*`PgzJCa^{us;$FVlt;o~ebUpc(W;(0Odd
zC6M(QMc}@8YGMjxQyb{UGDtu|mfQrx^?=qTgMC$mx_})b22J{)yabvjPfrCO3lCb{
z4q8?UIqLwjlo=!j-YSTs1*8o$N)KKo1WFjN@&>~kkSc@;U=M;cgD^-6G?NKEkPKuD
zxH$<DhigI1HKAX01v3KT2EF3^BJe_gTab2SWA(tjDo7dz=>cJA84Xej(GGSPXsrwQ
z4gjoy2r)q!77MnB(-M$E8dVYGxH)j(DcCAN&z6HUeLzjWJV;No9DGp<qO}Q4`mkOb
ztfvWbx3WT^f~`U!w0HuE6vjg*@sMwg0!e_j#lfXOvp3M&l0YZOgOdvAI605Zl$2DY
z^DEIBWU!nH9@)#&HUyo+1alT7(Sd9N;pF_hlFU5l3^Pce7<7yt=-6B$Zz+J~M6f<c
zD1wYB1Q#_RKci0ngXEMIkYpk1kcy*19oRY2&~9g;9%!ozIHIAt^&pyTq2|D)Ks)qG
za#BGfB6*-3s|1^jRZz{e0_|j0)yUH})PZV4I2KYsq3Fik%UGxfYp;NgV?)@f2Wz*W
z3WIh~Ro5ygE4U|uFJgw27NF}E!3zOETX-R>ULbw~4d;MTwO$BhAO|8^7!O*|1X@N9
zz9|EAi4EvdBGB*$sBZut)BqQrAQ4da2Rdp2>(7IP;R-SCK!Ox_Altxu1|eJQP<Qcy
zA`$si0|jLTkHq9`#F`+;ZUwM@P>zB&iXiO^&=nO%pwbxBvIQq!Td+ciMOg2w0Pl5B
zuvGx7BgR0au!CNb0Xduo+l3fapxw~WUJc0C%HW{O!q$_6h9gqx3F%CO@){^mk*h>-
zMuX)r42djg4g~2!WIu>$AV*+iLP&QFBn43fvJyv9$b>8w0oTh4%19@1gG9hH%HX*s
z*eVOqIwa7xXvnHWa6W>LE+TA$u%K%<L2kyKe6aZje3$~L>xS6;0?#ez9s^kg-Z~9(
zEkYGY7>q$HAVH(H$Sp-|c7haR+rAA-yXZ&w!|Lfw@P@5a_;oGtUGv~10h&B3RLCp=
zFaHA#%odmCYA8e(8lf8pst^i|z!?vF6v3K*AU8sy6T|{xu+K40HNfT%0^86L#)1c5
zK^X!(HVhlXh2J%*pse8H@24K35ajCXALgnM;^7&r;N$7%iY4qpMGv^=uAvUsuda!9
z|2z0hKj)m%yzEMtWyO_wC5aWFOaPs?P5~!F_>u$#P^AXl=>`e~gyTUx&^S02$AF|j
zm|$q*N*y50AY5pq2P#65YGPFTL25v_&<LZfL_dE9TVe)j1z~0Ia55s}K<osiBhS3d
z67W5gpalY{@HIT}DMpYFi!&07K<#AMB_mj~D99S<cn<t#Tu@&fI*J35#`GFAS3~=P
zgaQky8q%F3q!?r&q$ULqzJRzOjOhlDC<sFp+bU@pLyv(2i4szurW6jEgarjCI2b|U
z1TT53L3i<#<fPVu+u?9E;OQ^$nUV^iH8`N7J0TZ3KzmWd*o$<dG9f1_g@6}Ufc6A~
zTnAe40p2YNE{Wl8EeEYn%*o771+5xQ%*+7|sel{;!USzV4>T-UhLo}sbc6%s8fef7
zNLUO5ISS7?NT}|GL@a^!6t>(0(gHdV0)8HZ0=RUB_PVf~5rxeVi0w)WO0c6K@_e8t
zDS?cKw+}%J>$DX#$Upf3d^9PjW(H5PfGPlJ2LSiU4~SrbIud^V11Nk!7@-1p+Yhy_
zM2jr6f(KLqDkv)igYUOZRe&5>3NLiPT~hGTnxIk#v~mu#+6#IlCvx)?<TOxA0Hqyh
zs{lC%3hlTcXd^od)Y1m&fwa~Ut!?x;BQ~94uPwngz^m?fP$wN$S%RdH9gO4zkbl4<
z3fKxYNZg?72JMPeP}ft}f}hc)YoZGp>;aw6r2twIkM#&TY+X5!(TFVtqbK0O25HbH
z*ujf%KqHpW+G@m|fCniIh|eF$r`Vwt3!nr~RQZiN@q=z=ffcDlolu7*qk{$}AUOhC
z#)A|mWF9LA2`rF5K`R@<M-4)b>cM6cNEt~-%ppvN1W+;PpgE8!AgruVUW_;sQbz%4
zL;yMw5)WOU1X7JOj8K$XtOx4#7YCK*!6OVwH(V*?BtVdI@JuVna=3bB#G!kj6WdEP
z%8L~o;9|&y9+JtRRajtkFflypax05K(}WrzJsRZ-T5$hqD(J)QQP9)`4}Zg*Y^PuV
zE@2f6AOmM0ufr@;uvGw=12PaUq@`e}XMkcL+=t+?ljyuy&|oY0c*}CsLoGpEm^tcJ
zaOdeLsDnz{Vs$Hp!g$b92GHm?hy(8SB2K3)L>|I{nFF^RW!wkmJA{{t4MAI8zy}?|
zN1F<(^Rx|XA=7{$Zx@4hO{Xb=?*4-rU#nkTj-&#bs0=}sjFO&`mV!o}52SK2)P&GR
znhM~vtBQ@Fvk4%BA=*%y+lcXO*dmPRG{iU<)MYvfX-d@yZ$lgdIw}WbD1o#Ljtod@
zfrSQiEuKOysE-U2)P%Yd=3^8^FhNiw478mKav>;W@vcH<9%RcEtT_cey%e+#1Ih$P
z4pbUu6li)j33Q50Dr7Md&dDM8^a$o83u0~sYy<diBv2v+wJKnZQ0RmJBm{CZ^FYg7
zLE8aK^!3dkCmuo%s00ft=z<pFgVbn2WeakOazNv&iMdHBi3*uI3dPWaRl&P^!4o*e
z;ABt?o(0JSjolhTauR6v1MDzJyr(HS=YtmEmw+dJv=tB@109p80h(k1S*8JXq>h3T
zgrx+wNk>6RTLCN&b+3+s5`+bkSJDJc@Pd*9mIS2~0y<_8boi1Icr6NAOlWE=>7z&>
zk_2=T7dl@J&d=cC4s|O94Om1XN>b3VvMH%0iJ3W|JPb~FkX#F$27nr)1KRhIlM0^1
z0qqWf?Dl{y5P%I!!1xeZ*xnDAEQ}A^ZUJ2|0OMvA=jXxLh;1K9RYovD$QEH3FCTi5
zOHx&NQD#Z121q+-W=}_<1ay-#_%a&U9y4&+s*s<QR$814>8FCnM<DC+z+*75)8dn=
zj6lcs7Nq8>YifWE(os+<F9FTh6)PZZTTX+VPYt&fy8kO3JP;iZS-e-Qk(pZn9omGC
zGJxU(62PEiOEPl{z=;hqM4Aig;DBcJVFo~|RoL)<8n^_k&V`Tm*FwA2X`tO{dhy^@
zx6sQwloe7y_lv<6-oo~QL-*b)E0p9H!1f$~GIg;Ycu_1&Sv=GM@$ni6+aR;BphI*)
zC!A`+?Ste}bR$9Qr-(ELWHoGd7PQ+5(QJqN6m)uqdM<cHjXHQ77(7J{2_A3;!s{K7
zA3&>1V3vS%Kn#QW6M57Lu@DQS3^klc@{nFU$YaQd!J^v<_5`GwLk~Xmh$GTwWM2|8
z63J_jD1@Y1&>9kuG0=1g6$BsA2NkNyEC4Cci^?o;%Y@!7167s@xsF>IynZDfH1HA+
zy`jZcAv0gksj?)s*wbGlw=%vevjAL}K&%Gwl1kIkz%z_U3L&!K9ExVE20{zyU;)s5
zZN(s`BhrL2hzrf85OY8g3hIOx<>!~^fcD1}gN~laNd@nx$}iR{Pt3{IKn@m!VKA*A
zkAre4h{kdYL1r4*=X$AmDa9aHYbfazR3hIk2r>tT(?ID1DfeaNXXa@@+y*ip;bxcy
z5M73(p(r&6WP}DRv7j4(SnUqW*vOv4Zhlo}0W{3O&eKsSgEp(-C0MjNG%COsb0eYw
zwD1eGrww++9g+qlYr&e3_~0~!q!cBOfEA;NK$U~<6@lvl75VVu43eKf5rGs7u+uQn
zVuN5}fQ2k9{L;|R&;XeLiY#iS33yqbnS(SK3W|6bhN-|FeWWE1q<Fw?94HMdfL5#H
zNn`5Z%Myzf(vYf0m}6lyJgTD0Ks}7KH1Jwq=mY_3iHpq#X(%y_a4N!iX*vqH+=16_
zL@Ou1v?w_hwzdvhkCzsJmOaFm<i{r`<`$F|=cSfF*ShOK4>pKT$t(hIB!brSph_2d
zTt1eTlLGE-8K^69pv$_5UoZx{xFDpc6q;@z?N@le1<JKm(t<8&fC_-OQ-Y_UO2I87
zXp;gW3v16IFCT@3zOn-NSPD?^fTsZ<`>7$r|KM4u;$qOSJSb~`Ht54MC1@}Y+LX=A
zOUW$DOesywf!4y{%UCe(l~r=8RDi?-$UxAPp&qzh1}<nJ=K_?a7J)YNg5<y!A}_}T
zWl(Ut3a728g*?RLN}$D?*qRMcX~&{eh2)~t#FEq${oMQ%(A{~U+m&D*%BfUHsst??
z1r-a4=|!ojpe?!3&HN#upu5@<a}|n8b5e_;;SI_tP!}RM072*BLxnMe3lgZ1kOwUg
zs)n>Hq1W3$wIQ1ZUMYw0F4QlcdC57YDWJdw2Oh}Dknq;iL!K@H1!T640yr!&*5jbq
zg0KPPtGvwI0+j5lV5<PlUXVsZu|_T=O@UN{FvR8YMX8`IRb{E*E8;*02w<uCpj`-1
z)dO-oG;<;vkeVnqqgn%UShPAW%fM9+E=h1L1ac|}L(Bm!RR-$;r5X?i+7Jh+0AXk}
zMQ4M`Kads_Gr)&cg7QXmHh5+{8x#v5RiG6n*`Te?AOWZuv1suKG9D6_IC}^nB_OP<
z05t>D4bjqqazMvHgDiujA85H1tqzq1`wGg2BspaTSZ7PY7R&@6+f=Lp)d{+Z6SOhY
z)&$yg04>ulEh#810hN}ZNo&yT3L=%{BjjvB`}5LMOOne|V5i?fXL>*jbmBp4!V%k2
zkdK9ggg9u~XM9FtUP@*j^iVEHhZoV^1?vW_lL4Ld3F=`(udXkHuL)I(FH_QnM7$ns
z(Pd^DbPqQ|1hFIpHf{h38%Wt?tDvM>9Iu)gpPv@5nyFe0DYBGQHPC$n8^}bsPzUN1
zNMeN<2WyCcn|O!-110Zl*djGVZ3xx{Y8EF!+QmpUGo%v@jxa=g!4B4fq$2b>1GG^;
zwFFe&rDT?XlP7rlBRsaD3*sS#2BbE~%uCNn1((oPpiKgxqz5${RIq}b2k{?NGz4@>
zXfC9N1s9W`GeSYvkU|PqNT<CRR2+eK0z*<`JY=JNNhW9u8YlsP)<A()q#DLU6oBsc
zsRUI~(8?LKpj$&JwIVfHNfTOBAe4gc6-&%1)_`tvC;$y0K?4h9M6pH^sPm!+TKtib
zS^<klsA@=hfmZJtAh&43RM-}Pt`#aN%1nmbKo0RYXaN?;sd~k!pnK%OuGA=iZu^2a
zN<b>OxVVtkFoPCM8|Yc;87OhVR?CN{re+&zz)xg``W1BEHfV|*eCi%zG7?mZm#3y?
zD;Vl2z|JxOt#Jj7kjE#cV4T4U@~%R1v7v#nUPgXyDrk3&A#6M+F$FRT2#Q_s5}w53
z;?$xN=)r{`!;Ex{paFzijV|b(Xa!SE&^Ap-g#fV+ddz+hc(1py284!s4m`LF3JT|7
z1w#Wv1&E}AF=RLxHv12%6Qec2!K0&KWS|)fSsDVW?NSvI6+jJ?9K_-faKbh;fF^V3
zY7pdl4YVQ<91^jP8H@}J76t|;mZpi81}MNh#nQwg*}&A)+|1O>)GW!^(9#4XW@Ldb
zYLRSUVQyk(YHn<9Vwz@VXl7z&VrgQMWME)n4yH}aEDVgzEDS(mhG4P8M8h;NfXP^x
zL2NTOF|#x`F-tZxwKM^Vfpnp%F#)MDOM$5{F*i;#H82D1gM+#sq~0RgAPwD3rfFst
zW|kn=nVFiUf?bC0!W5{T7En3`WR{tQ0ZcpzByM63wFpjGBpV>P3gja5<U~`rw1K&)
zxv^1d6gwn#jWyZ){QUgf+%&mCq??;th+Bx8CZnIG`Ym?oiTY8j;NxnF%orFLikLtI
zGl)n85osVo9YiQFFfc@Mz>cH1#avujT$BnD;{+{SNdz^8qF6vjy+rY$9^eus5`^XG
ziXs`1ULNFwD~h5)_UMBM3y>rqXd??^nPrhBNJbST!vd;Fi^@U#IUu4ML`(q@3qZsw
z5U~=ZkRMXS7{)_ukfIqNDQys;2qKz5L?eip2O_vo2|fk}hFcu4TVfd*7#NCU7#J8>
zm^m1E7<m{u7&(|YKqL$^afq<IVG|W$6o5b$t}qTZAyx)P21bT|T=fhpP+bW1EF5NB
PMjV_hj4X^SOdweRLb@<s

diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-39.pyc
similarity index 90%
rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc
rename to examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-39.pyc
index 324b033b154a5d1db71862ebf7103810d4499aa2..17b373431080e522689c3074908153560ca3d7db 100644
GIT binary patch
delta 2076
zcmdmggn9E3X1+vTUM>a(1_ski%|r+GjeH(#Ql$(<UN!7lOeqYdj1X2WV+vypV;RHb
z0=6iDQih_g8ul!nTIL#-6oyiU$+y^ym`WKY^RZtKi__1@&rQ`YDa}aLcgasK%}vcK
zDb_E|%PdJRN=!+OFDS|^ODsv%PpwGIEyzhNhB4w(@{_Yui}Z_2N>jjEl8X%ujP;9B
z3-XIfj3+a4NV0rpU|`s+#9_+BCc(hKP$W7zma9uk5X2M)5fDKJ28Jl%%=EncqSW}@
z%;MtAy!80A%$(FB$;pD;a*Veo8*o=M8ctrr{f?zbfPrE1A|541>B&cVlo;hEzu?hf
zw3^%{WIs8CS52CQfq{Xafq|h|jDdlngrS)si?N0=lc9!b3FG96yy}d)lXvneGulqR
z%NxvSJK2y=R@wpN3_cKH&A`CW3DV2Nz`(%4SY$mpldp%VlyUNRz9Obl#?9IMeN5~i
z`x&Z~C!ZFQ+$=9-z{qGbIY8K&(Qa~&u%Q6hEKs}@ferMVd{#JI#R+7#Gl*~j5q==T
z6-0m)f(eJo9wJ)oAq)%*Zbj0QD@7C;%_h$l(c?A(DKiEUCX=s-C^1S+{w|WCC<PLb
zW?*2r#aLYA&A`B*$#{#oxTHvvsmK&06*KvulCTJeO-^ENQc9wo8Y2S(#MO!*S5H15
z+NiD$(#;(oUzS=_oSB~&AAgIZxTGkpBr`X)C=z6b0*LUMoFV4R4R$}&5eLK^7~Ll`
zic6YVf#QrMx41O7C>G=d?IInJ7DUX7g190e0u&WR;vkm9<N$FQZXpm;5=10UE*Do|
zivcn9C(jj^5Ca9lEw-He#FW&cTa0-{av&+A$*09-#j`*bG8M+(VlGN8zQtHo1a;DX
zaUD@TkTP&cf(dZot4(&0sEt-*U|>iF#V07UfG{&CZW$ODKoL@G2Tmj<j46!GOf`%t
zOwCOFQngGqObeK57#1=zGK4dvFt9MNFf=nWGUN#uG87vaFff8)Bm?7Q7Rgk`+R0gx
zvW%}LcSu$+mP}@p%8*9W@c~&!fs~qTT?}(AS1orfPYsU?Lu^hhZw>DP{uK6wj1w4(
zbS7_>(zcYT6{rzdAi9vDhOvg<2GtQlh71!Ji+hj^WthNNs561FP%h7HGNZJ(9%0>g
zkaSNrkd|dMn(Qa-<=_YkL~vv)f>=tRB*LDOS(KNUn|g~YK0Y%Sl&j<8i{wEv9P#l5
zi6t5F@wZsPjH3F<H>9=Xl0kCpMX5O;$)YL{k0U*`q%1L~G_|OFvVx4fXbs5p1`yE*
zBI-dz$K(*1PCGp&9!3rjWMW|CV&q`tVU}PNV>DypW8`7fVJw=49+#lR#sQ1VGn=Jl
zg&9F5nt_}aDA&fwS%LziL}Y<z4RejaLMF8MgaqXr<iJ=fr!D6L%7r1CEJb-Bhau8#
zK8Pzd`J3Dit^{yWj4vzlp8P|;Qvj64ia<H42$ax@+$T>|kO*%Eso^O|OwNV|Tyaq*
z$OMolir@iU)B_UJ0I6k9Pb~qLdGL6+#g?90l3bor<ONa$3fZFaNs1ztpp15lE5Ed)
zptK}DC9|lg2qX{A1NtD=1Q0PB6!eVQQ7n0xxdlbw;GQ|zS5Zy`lx?Ee%Zf{iGV{`l
zszGXNKn2F+Ns1|4I!vJO=VB~cG)bw&b{@zKR#0v%zQvkUS&~|Oi#@F<KQ|+_LX)*<
z7D$#4Qq~*B7w4B2C8riG0eOX~Ag4%Y@<AmF_B9L)42iY7Hz{A=HTl!M6}*laj0_AG
z1_maUriqr5m)zfBW}IejXlY?#00U+g2F4c225IIdW~S!G<|d|TW`<@aW+2+a%o4;h
zGc`+{{NcW+PKt$@rHPq^fd!OK0qHffFo20Cfy7PBjnfQ_jV32PkYzP7v@kQ6-2A}G
z*n)w90TNrF@}MXQ#LWN^pr`;xPZURDQF=jQQE}=m=HklYqV&lh9$44Qg1pC*n358o
zSd<Q~Cc&j&ks(MgxJcv+Nv$XeNi8l3D$UDFEh++4Dn+0a62+2LoS#=z2~r6P*rHkx
z3!EGlfmooRDq0OPpC1;c1&NiQzz2skxOf#S0_Do07La;S0xMboVsU|^@AAVO0I&`5
AJpcdz

delta 2030
zcmdmdgn9oFX1-8fUM>a(28N3X>WPavH}ZL~No8{sdDXCIF{LnMb3$0Pj46yYjM;3H
z3)rFrvN?*nYS^=QYME<TQW&y1Cf{N+V#?;2%*TFRMnyldpg=#l*wDaOzbLgJzo^7G
zJ~=<PASbmXb+R>wBzG|j0|Nsa0~14)_T&i*#5WglNHejCGcYg|iA<i%)g>SRVhVu>
zsGu&lB%{P+cWybxsL5&E)r|U+A926qfEhFS3Xc+_)a36xN{q6TrFeB1Ehg_0vY*_>
zt0vCOz`(%Iz`#%pGOdK6nIVg@hOvfe@^M~uMxDv;d6gNhCkyihGg?nh=aZGT16j!j
zA}kpg7&<}vm>3usI2em8C(q~WVan#5tjk}-l+C$$A%7neZ!sIx8v+bf%9DQ!NpAKR
zGGJu1n%p34&1f@ukFcQt*hEm!7J-fRnfzBcT*U!ogCmG=0ueqS!Wl$>6@m%7$rU16
z>_H3+3~ohIlUIr;GMY?2EuzP508(ZMB8(=ph$=BkPF5GqP?Q7-NHH)l++r*)@?>CO
z&}6*DTwGG5$y8(vl8TzVK~#bLGbo0u6hRK1{6Vx)U5$Z(A&NUbzAUw<I5R&lKK>R*
zaY<2HNoH<pQ8-AOJc#g^JVVTv8|-ALy&uFJ7+ohDic6YVfJ|V?EiTP1iU!%LU8D`t
zf`~p55LXyPh=K?)5FtLfL0pDg5X6)K5ebu*iz~22ftY%e&x%WkfqZ?7Ehj%QCAH`l
zV_uOgNXlUHZ*f`i43LFPh4E3$MXANN7^{k)PSTgq5!D4L0|y?M0Ef5g<N}FWcU1-k
zhGbAgf&u`9L8*e1fq?-O=*3zL3=Aa<C5$PI%}h0nDNM~w{Q|X2HB1YbY8VzWGBSiS
zm@))11TjQR_K-|vteU(|QkHSm<TH{Lj9HUir83xIvL~d}WHVxzYq@H<Yk6vTTo_`*
zYI$pT7x1UBFJzp+SR^r-TUuLLs8*mxV1ejDh8o5iej9}03=<d&B_=Qya^)FJ&X*Q9
z!>O)@F_=M<*Y6fvZhlH>PO4i-5y&S+j3D0|Oui)TC14L?f+I`;#8RBBBO@s;2TC0r
z@$m(TB^mMYw^+f9qMFGuGFm}NAUXD;)EtmxQ8|dmk)B#omY7qTT2un!vJ|Bzrf3S@
zV$RGhh~g|tEzU13N(RM1Zc!!3j#>~=2O?@fMC;@mGMx%iOgxMnAjrhP$i>LP$ipnb
zD8{J3#K*|PD8^Vcd2*wy2p7ol;Aq&kdA_VLBd7@3BBuq)X%FNqC4_1PN<<cj)-cxy
zEM!8C3U_&JIWJJ|2+?FI$^qGlNMN}juH@ti@;|uZ!RaNwtjKfnB!$j;P(mmICHo>!
zYAA9A$qHrWrDT?6rj#b;#AoJ$3ajFx29N|#L1J<?Tqqr6=wuK92~-dZR6rD|gVeI8
zr<SB;=A;(E!~PaqdTL2>c}kH7NF^wQi%NV!LCIE-nVg-IdW$8cG`B!g5S(Lfapjkm
z6qJ_4r(_lt6@WB=6SE$O)e9nKfOIluN3rB(<`xuzLwnlf=ZbP7p!6TbURGREl$n=a
zQ~^?3IayMvoJ))e6!ct-Me`@mQfgtGJy~8^R(?9j7(PgmY#0xT#G-{D4=@$v6lqV6
zR<>YY$-uynIAhP|S<2UU)hj3{C^%$frlfL#SShJ#3MHw<CGm+VDe)PZDJiLW8pWwO
zX_{7GX^;|Sg^-NQVuj3Ng+v923a}16g^-L?h)KFBsmb|8i6!|(3gwwOISPq6Ir-%Z
zrFoep=|zbtsR||e3Pq{8`DLjJB^myy5Zy?|2d9E0QWYQprjVJZP?C|VP+U@)lA2ed
zkdj%Hnp~1!RH=}bU!;(mUzDnlnU|Jdl$%(RnV+XO`S)Gl$%Xe;a7BHyFfcH&G&MJv
ztag8gx&a!nNH#DrH!-s`OEELGG%+zZPBSwwGqadn{=mxE3>3!T<O3>2iV{KGG!Oww
z2t}a85yg>MlwOcnRGfN?xwx{pD0T9a2iA!)pkU?!g<N7$dTDNIUJ1C&EYb(*1s9}z
zA*mH5A*sbBL8W<lsYOMgI;aSgWTRNJiu3b|%0Mb-gNP~+0ZxqbK`c;|6|I=u@K96=
Y6vN;oRip@#X#^3V)Lb-o^7@A@0Q&&@*Z=?k

diff --git a/examples/example_docker/students/cs103/deploy.py b/examples/example_docker/students/cs103/deploy.py
new file mode 100644
index 0000000..2429949
--- /dev/null
+++ b/examples/example_docker/students/cs103/deploy.py
@@ -0,0 +1,52 @@
+import inspect
+from cs103.report3_complete import Report3
+from unitgrade_private2.hidden_create_files import setup_grade_file_report
+from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet
+from unitgrade_private2.deployment import remove_hidden_methods
+from unitgrade_private2.docker_helpers import docker_run_token_file
+import shutil
+import os
+import glob
+import pickle
+from snipper.snip_dir import snip_dir
+
+def deploy_student_files():
+    setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False)
+    Report3.reset()
+
+    fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py")
+    setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False)
+    sdir = "../../students/cs103"
+    snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py'])
+    return sdir
+
+def run_student_code_on_docker(Dockerfile, student_token_file):
+    token = docker_run_token_file(Dockerfile_location=Dockerfile,
+                          host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp",
+                          student_token_file=student_token_file,
+                          instructor_grade_script="report3_complete_grade.py")
+    with open(token, 'rb') as f:
+        results = pickle.load(f)
+    return results
+
+if __name__ == "__main__":
+    # Step 1: Deploy the students files and return the directory they were written to
+    student_directory = deploy_student_files()
+
+    # Step 2: Simulate that the student run their report script and generate a .token file.
+    os.system("cd ../../students && python -m cs103.report3_grade")
+    student_token_file = glob.glob(student_directory + "/*.token")[0]
+
+
+    # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt).
+    Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile"
+    os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .")
+
+    # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file:
+    checked_token = run_student_code_on_docker(Dockerfile, student_token_file)
+
+    # Let's quickly compare the students score to what we got (the dictionary contains all relevant information including code).
+    with open(student_token_file, 'rb') as f:
+        results = pickle.load(f)
+    print("Student's score was:", results['total'])
+    print("My independent evaluation of the students score was", checked_token['total'])
diff --git a/examples/example_docker/students/cs103/homework1.py b/examples/example_docker/students/cs103/homework1.py
index 3543f1b..286b79f 100644
--- a/examples/example_docker/students/cs103/homework1.py
+++ b/examples/example_docker/students/cs103/homework1.py
@@ -1,19 +1,14 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
-def reverse_list(mylist): 
+def reverse_list(mylist): #!f
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
     reverse_list([1,2,3]) should return [3,2,1] (as a list).
     """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return list(reversed(mylist))
 
-def add(a,b): 
+def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return a+b
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_docker/students/cs103/report3.py b/examples/example_docker/students/cs103/report3.py
index 7d4b431..c9a23ec 100644
--- a/examples/example_docker/students/cs103/report3.py
+++ b/examples/example_docker/students/cs103/report3.py
@@ -1,6 +1,3 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
 from unitgrade2.unitgrade2 import UTestCase, Report, hide
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
 
@@ -19,4 +16,4 @@ class Report3(Report):
     pack_imports = [cs103]
 
 if __name__ == "__main__":
-    evaluate_report_student(Report3())
+    evaluate_report_student(Report3())
\ No newline at end of file
diff --git a/examples/example_docker/students/cs103/report3_complete.py b/examples/example_docker/students/cs103/report3_complete.py
new file mode 100644
index 0000000..37c50b9
--- /dev/null
+++ b/examples/example_docker/students/cs103/report3_complete.py
@@ -0,0 +1,25 @@
+from unitgrade2.unitgrade2 import UTestCase, Report, hide
+from unitgrade2.unitgrade_helpers2 import evaluate_report_student
+
+class Week1(UTestCase):
+    """ The first question for week 1. """
+    def test_add(self):
+        from cs103.homework1 import add
+        self.assertEqualC(add(2,2))
+        self.assertEqualC(add(-100, 5))
+
+    @hide
+    def test_add_hidden(self):
+        # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.
+        # See the output in the student directory for more information.
+        from cs103.homework1 import add
+        self.assertEqualC(add(2,2))
+
+import cs103
+class Report3(Report):
+    title = "CS 101 Report 3"
+    questions = [(Week1, 20)]  # Include a single question for 10 credits.
+    pack_imports = [cs103]
+
+if __name__ == "__main__":
+    evaluate_report_student(Report3())
diff --git a/examples/example_docker/students/cs103/Report3_handin_0_of_20.token b/examples/example_docker/students/cs103/report3_complete_grade.py
similarity index 78%
rename from examples/example_docker/students/cs103/Report3_handin_0_of_20.token
rename to examples/example_docker/students/cs103/report3_complete_grade.py
index 89d9eaa50e45e079355529ae91c29faf094d2851..9dfbd03283e1bae5cb87d6d189cd8abfb87e97da 100644
GIT binary patch
delta 3476
zcmeBJ!t%bKMVKoyw;;c$L?N#<x1drXu~;FmVDcF&xyc*Z<q~0X1(j);={cz-Fn(!X
zW=TnEaS0bpATzJHAT=2-pI;1<NGwV(NGvK&h4G6k;Zh};xv7&aIcz4Eakw&ZaZNTn
zp*#5vhYlmx<h~PfAb|}hq(IcU6KWu8!wIR$`#D(|xh6wc>>xqAlOD2MTy_ctl_eSZ
zc?w0TAg>t4rxzurr0Nw^a&b*wesb>Qi(KnKihrC^0#S0OjV9mWHkmBQlQ&tAM||>h
z9)B?Xk0%gJr}GAb=}WwE%v@Ytlg;_M7`Z0<os(nanw)n|pOI_w{Bx3%_4s8b&*vAN
zoWQTo$E5%TsX4`|R+H!RtAgzLcTZyS1O5y~uE~l8l9S(^?_}hf{9ZtOGP|HGBiH1O
z7sMyq3Cc5aO->MeFxgQE90c7$s*^VgDKm0SPP_!NuJMw@WPV|d$ri$Llf#AeCbtQz
zPhKZ1H~Flv<z#jdqsf8dVv_?zWWaQ@hy<7p5aFD>TSO4dek>vhriDeVCI^ZOPyTU5
zcXFSoHXGQ%%99U@+Ok8KT$2}ERbk|s%zsUM^4+WQf)G)#ufTo+$;iUA%@$LbyhluF
z@_jLd$${dslV!!_M36Lqbb&QZ4iwi0TRTCV6{dHtxWeRp;%bwhiW|U$3~tCwwvdpS
z94cW9Ry{|;1SWMuLSgbx327KtMp9iF$)aKfTLmLy6C{y>#NuKuP%McvS#eEPys18U
zjil`4Q<BoGU`3NZx=4s=YJ%AyR~8gy=9OqDDJW@jO_q^znVcwPGI^Sm+~loNMUyq8
zWhT2yTd8t!ft44fmXsEO5?FC*PDwEr7gtJZnnGzoVsf@dp{CX3)zUtbe@MHCgVkmz
z*ec`|=p_~>78NB{YGi0~P5ysdiIHn^m5j;c^)i}_T$AIrX|h2KnXbpeC^`B3oyn6O
zg+wQxlNATkIYPYbZizX?shU=k1>__rYstAx&X)@UrNDcNj9il^-ZPvmA+Ib3wht5z
zkd&`il39|I$~F1lJx4~a$&U9mCm)axX5^ZD;J%x3o)X04C5cHnsqra^C5Z~Q3emA(
z6Y_i%Y!xgFCO0Zb34jHPlk<yG6>Jr%Yq=(`Q?Qu)T)}j*tfKMcKt+(j4iB9s2P%qB
zey6B7SwKl{vc3{1?Jz!4nOvm=^5=XdBW<uTpg>kuC{(ajDAa)14iYJh&&W(kNzGHR
zRRH^O@_!{$Zn*l)w8@Rnq$Wox3y8wSK)RCi^GY)FN>e8{DjP6zO^$yoHu;pY%;e|F
zb~12v$_j;$AOP724U6}W%_mo@s7_v}A`XcjkYR;-`AH>-nR%%x3bqOclP{|1O;Qz`
zEUub2xlPq-@<CPq$%1OJke~o5uY#ns$qj0{lh>*FO=i>(nY{NIJHpY*T$3+8%bm=q
zAv}4Rx(1l;doBz~Iv@j-Co^iufQ1j#vP^!vP;j!Kmc-<EjaicgwZtdyec=F>OVzXh
z({V4kChyh^oh+y&4VM3)#WLALO9{+A@S20O&`2*cFSA5L6DBfwvz8RHWgtU8XoY~K
zW3<J<G{}%XZGJHOz-x}lecJ4k=V!A+`NGT!N-*Ojp<+UlLv;LMY^GG0wD9C#Iu=l2
zNn~3=zDIaZeDZo-KCrT@x<X+3zpf0raw|OzbRmOxlfZ#s@Q!`5o<0Yd?WHe=tgJ*o
zYVvt~KV%_I1C_~)8WNLzRtQcmH_(BUN8peL5eks<2;@M6cWjgI7>i7PW&ldFdp}q*
za!vm60aVyZd{kxRnr#13Y4S!xC6uV0_;EHP*JQm<;*+Nsbx&3>Hk}NyPIU4FV;-;r
zHy8_nX^81ID7s%Rg2a5Ni3wQEToVCsa2z!OIazqJ!8<mV;$ls%$(*LTj9il~O%*0b
znd*URkX}=b$@fjACSNe+n|$9iijixwjhXr660>+luF3Ph$xdFVA~reZ2M0*$y>H5s
zEzOlC$C#T-z_SBV`34Gw$^2I0lY75|%AXJBT9c(MR3|%HSWT|6ke@uqLLOAkfJ+wn
z$@_nBg3LVgLlKe%b2IZwOHx6_OiGEqzL~*fYfHJwQI^h(T$2}AW;1e4=CcZ!oNVO)
zDV$M@N*x76fx`u=PN8iJPgsipt?7_eoSz4iLNp_is*GTQ1)0g&Iq>#GelgeNC~L>b
zbF2*~pR<-?<eL22TG9y|0!69C`K3k4V0Ty7f`xJwY!xzd3&3n;g|z%4g<OTqJOxlO
z9uH{-6<a|hr!VASlokfF(-dqKa`ocl(=u~X<Krh+*?<xY&p(IBw`_zMxhC)bV>0=c
z4fkYSTXsgS$((=XCx_Y!gA&qT!^v}OWg$rcoc_2b+y51te8<*o@*G=9Nb%0aH5qCW
ztd1+Rm7l!MfyWDMHN;H{Rhb1K%fUj*AP<1rYsDo+sfkb_r~<vH%mTN}oK&zVRBvYf
zWDa}7$yWAqlOyeo#KBUa#Gni^Rj<4#vm{kRlWTJQe=|m|>Gv2J<rul9|6^p-09Qj+
z4hmqpz`+|tPhG<Z^4W6-!^x74+LK)!^(UWq7M-5W$jHmY#WlT#kx`M6Yx;afMq5m?
zrpGfea<GEDp$Rey#N`ArCKov9O<%*r$OGa`KgYzV!mVDUt_f<0rlna;b`+AFKAV|Q
z0y!9`+c7h0Pi}VxS=P_Yr~+~XsHHajJTs#OL?Nhdu`rmNAZR?@orTdy0qkwG>;UE3
zDrx0uPQK_OH~E8$!el{L6OdQ*SQ)h#xu!?4GP*EwO<%yusLjYVSx`rK`U6%*enzh8
zA6Xfr8M&tWu`yaeOL>&EHhm5oqsrtvZh9c4Y>dW}9o&r=xu!?5Gp0?x?d~yI#Y1hf
zw};&1EDun1+L$CVeH90z5F^+0eH@J1lNmh~L2eM_WYm*|lyRA93i+ia1*IkNDVaqI
znZ*iz`FW{UT+`z@85J10CQtXYW#pQC-BV+dmj@%)WN$D3$+Nw5CZF(9oczj5K>%U~
z)VrW!Ue?<e(sF=`*(vELl;mfp=4noD@-}4!H*Tii=V#R7EJ&=($xlqtw3__MTW7Mo
zkKSZIAH&HFKC+O40iM-C&2BC(uFczh9vr9#B`=4J%#_raJP;GqQUY}r;uBL+pf$fn
zacWMQrWHgUq()gGBqOsJ<g-KtXyX8+MNc6lBUJ%xmTpRFa(+=_Nq&(+d1g+ILSjx%
zez^juBLZ&hE0p9b6s6|om!&EMm1LwUfOR9AADo)10Fr=&BPiE{c*P~9DXDoS3MrXI
zsmUezMU{|30o3@(%uCBJ%1tcE%+J%CF2l<hFnu~N<2o5GNK4Bw9vU<XwhHPN1_maU
zrsf9IllT~SOz+`mR8qAtH#JK%fB|z;b7P}ab*{X`+*DhoAaF0mScz-;HGW3t=~4oW
O>Wo~I-#P0tY61Wb=nLBb

delta 5998
zcmaFg&(g7krM`h>Y9#{%)K1apVJ*ooNz9qz&EPH4Hl>F>CAB0mGpBe;?G$eYkThfY
z6mO9pj)MH+;>@I+R4^w$sU$HoFEwR~H$x9=W=U!;NO=!yNoGk->XaVV@YK|7!zrc3
zJ?zE#rA5i9AX$iE>{XcsiD{WRsZ*Q|2{1AQcr&wzfL*dMMk<)=2z!7TGXnz%3o$S-
zBo`YR80%-`=cbnD7iAmj6;vuIDe-c-RwU*Y<fJMTmz1WY=9MTU=clCVDTHKX7At^E
zR>&-#{Lo#ZULmb0KUbk7BNZ%KtdN-p=49p-mlTyIm*f}ereqeSf*6&&TwsG!QqvTQ
zQp-|{ic{lrGK)(zaw|cUrj-IOmx6+V0$34<;htHRnx~Mc0FqTuhp19lC`v6UEy^o~
ziq|V7=jRn?7MEn^r7Ps8f$c6%%uQ8D%}LEo%_{*r2x^-`eo;zlk)DF9Ub-H{DyRg+
z8PSG1Mmol^nhM1k`K382P>U3zjdhH43}Y2E5@B{~LUn^2Di3zDvO<Wzi@%kEp+Zh(
zUTU#IZf0?DW**3v$)?fr^`4-B1i3~btu!yWBr`uxAt^tlQc07Si<b)&9Em9@8i_hd
z$U&e0@qI~ozCvDUZc=Jdu|h&(f<j_mib6tCf<g&69$=<HU0j@*TaZ%;b9hNcYG#o_
zacQo#6~wo83NU*VY!wo<lMo?IiTg9t6yoFa5_41I;}vXe6_n!Ra}zW3;^UQ|wkj(G
z6y+!7q~<CZS}BAVWtOBWBqGA0JR>tX1LQ6RB%eZ@TTqmlSE7-o6j`5NTBJ~vT3nh_
z0*YcI1#JZ*1zUw`kZ+81j5KRO0SQVlFdaeQ)CEcri3-KJi8(poB&4LHfEwRMI>tJt
zI_9yOn!H@7S)|PU+rPa`3=AMFz+ef>BSonN`9&qhG|wU6ELED9S(09qn38IwhhQpX
z=7OA{5E_zNT;iNqoT{S`1a_2;LPlmvDlZqNRv)M_@foQ(1*t{FMldr{%Mx=+6H8L#
zA>N9IW^_=ROwLIxE>-}SFB)*GG_82KrmtgW)RKn84=CZ5q!yRNgW{n$H75<Mz@C*+
zor#xgx+4pt96Km>G$9;yZ;-*fTu={y3mslAh(!=D7;8XikTIax1Q&A(whBtl!3u^3
zhER_x7%TB|O~1m*=moMxfsIj08DublR5*F|5y^Cj@x~gM@jbD@_(CctBLfJF!HYyl
zxERNSV}Z0tcO|`8ga%ArX>LKKLSnH(UIFi9c_TSS-id~CS@3Wxs7%XD&q*zT3xLuF
zC>Fqh07)E~dBp{($p|I+#c<V$Md<~JMa8Lbf#OPpWJzXjD(}P%HjKOz-?*}X44lli
zUw3i>qYfkQ<Q_(OW?nAd$sZV{!So46buj&bQGt<nvH+73m}ku73DFpTKoY{JIWU)r
zmus^0!5Nd!GmApR&K^`^2N_?SnmW0@PjWK<Au&eY$u=w|jJ%TzSn|N6_~frF{!msl
zYao<$m^B#6(q)SSn*nlDA6pkA?_>#fc}CvJHG&e8{n;gCAZZkojtWati@{~CtwLf^
zda+(1@8m{y8Ajg8@7YBsA2_NnfKZy6Q=DoAl28Tv#!g&vvcs_qkoyIjBqwj+=!CdG
z;5f*=ft<38yp!`dB|!4?kIO;i4j#YH$UFI?rRd}ZT!NDqaIsE)FUmW49~Unx$j7{s
z&vL0U@=pH5rOe1XS(IBI>P}^lLtz=SBBwkvFTI$TYjVL!P+pom^Q0`ujvXg;8F?ok
z<OYlJp8|{Now8u$oovrz#K=3j5KJ=iN>AR$Bh1J<`67=2BkyEJUVX5f_+)opOE61#
z^4Zh6lMnD}b3lBq%scrbuPuUO!UuBV0zQe!o6g7!LDhgA2o0YFd~%#nVP3AuTxUVy
z{)A5n<i-Fnr;tZ%vJF4Tltg|Ju$^L)yZJ@IEMdN+qSVA}gnof@@*tgt=Tt#d#5sL-
zsNCfB0y2|Vos$6xpEw8cq3~qB^Cn1^d!Gku&N(lIB-npmO$}-W*xAJjwhBhZCdfhs
ziN(dBq*Eg($;dl-s-OnQ68Q^YyKFB=fq0P@<Rl<kK!!l-79|BxUBk;Yx&MMQBk$xK
zAyATCASB1gJ9(c_5hL$p17T3g@)5Su0Y^8ubpUCxKx;6Nt<YM(v>-7#Tcc3Zig)sA
zVIM}`$=`)tpf+YG*ec`|=p_~>78NB{YGi2gP7V}NhNQ4xmyAKqo9Hb*Sz)aph`B3B
z6PmjIiEMzTzW1W6lk;ZtOpd$41{VQEg@OUd!!lwnjJ%T*#DW-kCvOr{V&t7{X`nE<
z^{NoaxS3a##33FB1stRl)B~5syp!$3ofvs1H;8M26gpl5>HjMZDvBpRye4c7(Us?;
zV5?wZ0O6Fx7ndX!l_=OMfQlDA5TT(7kt$BkFG>Yf<h7ufm?!~C$tNXD8F?rFlQ3rF
zoop!S2(m@=2FOZ7N%6_~Hxxkvg6_gRpdt_4>HuYvjW<BKNnZ+_fd1T2goKmgO+yig
zMWFaoRwz`kRVdWZ<ej`*N)lvD<4sWHaY~y)!yKdvgp>30N;30GQ+c^28<y!yAsYcn
z1jVJ9C8?UcT$6onNrMc^yk#qktf~+aSRm`5p~f#`0aEz)mWm-lVWA$Zv9DmO01YjW
z{e^n4c0X8r@+=uWBo`_xz)F0*kW}8u_hdlDrH`x?H2EtiC``UDYcP4AtT-d@<g2o=
z(EJNBxC)vLCOgT23X*&|KStik8x@2nUy$SAMYv0umuvE}ySY#~^?Mpn);xJ(9%NNu
zt>@%9q0+D9LHT>5g81b7_hy5|!oj5QWNAfE%z`XRQ;Y|TiL;gzRa${87oYq=g=O-Y
z2cRSf()&S$V=~`Eh?yWcj>+Z^rO>SfNw7?=co+aNZ1RJLpz;`^on>-v68q%-I1Z>B
z$iUnrJ~&U91(by+LllFu0)!<r`L8mlx`FbTQ#E0(6P{f67~+1AwUX#=5uOb5xa4FB
zZ62sSm90Wh)&f--OfCDKXkZEkJZGEC{j>+HPk3^`b2cs|TO|c8g*+dyTZAW{dn$*j
zi&s60k#};sx*w|WesvY7Atw~XCi7_Mh#^HAC?P07%W+VY1U#3XT%`fZKXWy#K#BD1
zb2$+8_Bl8=t(yjl8GB77)C4kV`fQM*xzoib%WHLmn7%VWxd~yl!YfFa3r~iH1;|fQ
zlgqR<P>q_rZzib71i9tqE07r=mb{Jt*bCy5BVWTj6Y!jky|@_MXPP|kwGJecpL-3e
zd^YHcOn#{&1uX<wbwNp17fMP^&ewGSSLYja%^7(oKi37NK{q`)B}Ay8WJOr6<mKX>
z+^h$(eYqaEZu|Y#45>7Llm(Cq0Tif{*M1P6tgo*LaaaF4O;D)pdZz-S9=x*zQPS^0
z6$PUK$jOj$*wO&1e&KsXXqlUvnO9np3MzS1O7!*340tEcG5~4YVc-lg+Wtcpgt7Jm
zs4=w1(1DS6@*hKaPDqUdayG2p7zA(Up|=UMiu3c}lKF@>U{aM4T(}@JIXef@R0OHw
z<(mBWqXWn<_MgNi|1px1Lj+=)UOA|DrvWlLzPO|a6jPH8jU|;KN{UjUqZH5*9~5S}
z3bvp|4=>l`)1PGE)&4qrX=sW~Q?OOY)r*f$%gjlQkLTr@EdLqY>UsFZVRC{AHz!0K
zR1`n_A~M;*#0ccFd!OYePcjh(F$=zkO|CZ;n0&^B8^k^EMPl*?6BcOtgJc4($pv4G
zCMTE(a_hwxB$i}E8^rQ*O+N5N781tTY*LvV_nk)+;wxyYy(+T+<QCq^jAmeqCw>)$
zHY=eDGV^&STbLO#@=ne)1I1>$nGs5%UX@t@4L(iY$>+_?LC)>@2F{6wkAx@h{{|{t
zL9NPH-$3nc5KG~^H?(H|VJ-|#;5-%}3v4a48F?pXSm=Yy(D?za4Fi8DGJ;IjnmqT1
z2uKF2>E)KZ5Hq=y6(9**6VjMf_^!uWl3x%HYO*AzgG$GbmY^mC$RP{3SSI^fNk9uB
zNU%;Wu+jn9^3Y9q@&+rAp?j@Vp#waK_7G&)3X*O?-O1?(*%@_3ZT<4|QenNoJZM{D
z^1<UqlMAeUps5MsI7IG)@@<v0@-%rT-?0XVT<ir2&iItXl0;CCZ}OSn#^6x$x6uZ<
zO#cta{rhde&Mb5np8Vg2pOJSmpKS~y@8m37OEHkMK#3GJ*-kF}3n~(B+3GR!PUf%!
z6<9WQptfnAT{@`7wew))oh)mw2C{<dKd6V0Xb*12d`J?RT>l?bEu6Fmb*R4DD}tRP
z?x3##vnn%9A-}YwptK}DC9_B&vseKX5>~uiQ%x8Z!0B;n1fvavF@sT_Qyn~{s2<BZ
z`IUnQSa>=gBcmUvV;slG2ucO(9Ko^fs2~ip0vb@@O7Op<FG#Hc6Qd}Ic4uNVftXUy
z#ApUeElwbZ{&oU+Rnr-iwxXP6p+z7(5<%k;Aj5gNcsKVrKR7VGmz8lHBky!;HbyC?
zM9b;nY>YeVjnhmG%nS|Sz|6wH*do~=&D_My)ZEzI#5B#!(9FaPL|d3yf>>szW~r7Y
zCP@Yc7Up2u1f<8z!XU-M%o4=5fYK>w;z=NJ6LYv#AjWhVc1BSR^W;QR5D$XQP0fvs
zQq_66Kx0?7O3=XpC0;Jx>0#`Q&Wya%XR<S@GxAP9z|N>&4;mq2WD;S(J&Xz-Uu9rm
z*wzSQfyXyc22}&RQT3yaF@f|z@V3S@W{d%+`bu=|=tC9|%^>Ro+0eAZ2QUJ>S=m79
zm>HNE4lyt=yy9nMU|^WK1tiA6z%ZqU8!3xV=?QSQiU}=FEh>&FsV~h)jd96OE(MKZ
z7RP{^px{xF_=2L$vc!_qm{iDUaWRY$pOT-PomvzF9aJig0SCyG9^Tyil+v8kcu280
zrL#u@(!Yo=O3g_u$t+8a&&f|t1a;S^^awztKm#yRi7Bai1(j2JSU|BgrH2(_TxoHs
F9srjjp1lA7

diff --git a/examples/example_docker/students/cs103/report3_grade.py b/examples/example_docker/students/cs103/report3_grade.py
index 03baa4e..af156c0 100644
--- a/examples/example_docker/students/cs103/report3_grade.py
+++ b/examples/example_docker/students/cs103/report3_grade.py
@@ -1,6 +1,4 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
+
 import numpy as np
 from tabulate import tabulate
 from datetime import datetime
@@ -431,9 +429,9 @@ def source_instantiate(name, report1_source, payload):
 
 
 report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n    """ The first question for week 1. """\n    def test_add(self):\n        from cs103.homework1 import add\n        self.assertEqualC(add(2,2))\n        self.assertEqualC(add(-100, 5))\n\n\nimport cs103\nclass Report3(Report):\n    title = "CS 101 Report 3"\n    questions = [(Week1, 20)]  # Include a single question for 10 credits.\n    pack_imports = [cs103]'
-report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f5061000000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b86944700000000000000008c0474696d6594473f7ca5000000000075732e'
+report1_payload = '800495a9000000000000007d948c055765656b31947d94288c055765656b31948c08746573745f616464944b0087944b04680368044b0187944aa1ffffff6803680486948c057469746c6594869468046803680486948c0474696d65948694473f7198800000000068038c0f746573745f6164645f68696464656e944b0087944b046803680d869468088694680d6803680d8694680b8694473f032000000000008c0474696d6594473f4186000000000075732e'
 name="Report3"
 
 report = source_instantiate(name, report1_source, report1_payload)
 output_dir = os.path.dirname(__file__)
-gather_upload_to_campusnet(report, output_dir)
+gather_upload_to_campusnet(report, output_dir)
\ No newline at end of file
diff --git a/examples/example_docker/students/cs103/unitgrade/Week1.pkl b/examples/example_docker/students/cs103/unitgrade/Week1.pkl
index 798c5ea433edaa5ae586e2d0aa2afaa78549e7cd..fc298168d395c432420ad99533ade24705a6e589 100644
GIT binary patch
delta 15
TcmYdDm=MNg&%A&E1WNS)A6^4C

delta 15
QcmYdDm=MOr00pIb02j>yQ~&?~

diff --git a/examples/example_simplest/instructor/cs101/__pycache__/homework1.cpython-39.pyc b/examples/example_simplest/instructor/cs101/__pycache__/homework1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..797cf46e4a10aa111b1af224256cb5ec15734be5
GIT binary patch
literal 835
zcmYe~<>g{vU|@)L(@eBuVqka-;vi#I1_lNP1_p*=Ck6(F6owSW7KSK>6s8pB7KSLs
zRHhV`Y^EZ+ROS@cRK^sx6t-rj*$i`;ni&}xQkhfOQyJ13QaBbdHZwLeF*2lZ1~X`K
zB{L!!4`MSjFfcfSY~W#FU?^cIVXR?jW^85*X3%7;TEwNGprGKMS(ciokf@N8SzMx^
zo?8i`)fI|TOG=CKieci(`FX{e#U+_}=?eL23MCn-3dM=JsS2q%sky0nCB+Jvc?w0T
zWvNBQsS5c;DXB$z3a)zTdSLUR;_)DJHKGl5jC71+H5H08@=J44pw=iv8|xV97{)4S
zB*JXeglN`ey2S$0c#8vSOo}FB6dS~mD;bJFzWWuYpOK%Ns$Wu?k*e>KpIi!dk$!1j
zW=VQcVoGX!K~ZK|Vo9ogYDHphK~8Ehj1ga)3FepRXXX``6qP2I<QM5D7aJNFf(<Os
zFUmI5E2zB11GO6L15lV2Gl2q`wFne@$zV2!&%nR{VuLU^z&RKg7-|@57{wXDft>^m
z=#uh$g}l<-q|~Bfg@nWeg~YrRg@mL8g_4ZSVuiHQyyTM1{5)9jfL&M#3#F2b)XXA<
z;?i6zaKPFrB&MWjB<dt-D%dI{Y9}dxgF}<)7GvTq#w0KYLa;C}Fx+BJOi2Mb3F3I>
zA`tr)M|^y4VrE`^e3f`)erb_HQEG8%P6;TAjTE#Mj1+7YUV{8nB^CsZTu}5UDir4?
z=H!5rizZ7ED1E;KMZ-%_XuM=+U|{gmWB~^a+={%!+|>B^TdV~|nRz7;CxLW8SYXF-
Z*yQG?l;)(`frA(1G!AwSRt_c+MgYT`*53dC

literal 0
HcmV?d00001

diff --git a/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-39.pyc b/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86b610bc46f684a93398565dc2e7f5f7b08ee22a
GIT binary patch
literal 1221
zcmYe~<>g{vU|`tCu9bM6iGkrUh=Yt-7#J8F7#J9e3m6y}QW#Pga~N_NqZk=MY^EHh
zT;?cdFq=7tC6_gdHJ2@l4a{fBVUJ>WXGmd9VQXPXVM}Gt;%H`$;!I&kVaef&;&Nw5
zVNc;`VMyUfWo%}S;!fpB;ml?#%1LET<!)wZ2AR(i%%I8j666+5##?MbsRj8(C7O)4
z#8b->b4n9SQsaxjLh;2Vr75X-C7MjPc#2ZXQj3aH<8v~LOKveIrlj~KgY+X~CYXO@
z7#J8*8KM|d7^0X`7*m*97^0X{m{V9<7@}BGSc4ff*>16hr>15bCNm=G2eDZh7#N&E
zL88IHz)-`mfT4sjg)xP(nJI;7Arm7^ggJ$!nJI-8B$C1aGS2TM$nKY*0L`59|NsA&
zAagPo{{R19lj#<BVsUY5QHg6|X<|+hNMa@9EtcZcoV1k;MZ63Q48LOZGxBp&^-D@K
zQuST(lS^|`^Gb^KOY<^I(u)#PQsWDXGRqQ6QuR|S5_1c3Qj1}X_~J}3zeGPXuehYB
zG`S?dNI$vQ(7;e15`Kny1(mlrN>YnU;uBL+*g^gRIgx=;h_Q+f8~}PSN!%d;3QR~y
zq%)*2E@EtEOlL@8TEqzVJ+q%C^Gi_R6@hGc$;`mOpvhb$$iTn=7AO(`2{09b(qa+F
z`XX+S6v*db?-%hgFfiQW0Xqa5$sh&Apb%wXtP(<UCsf)`ll2xye0*MFZfbn|Ew1?Z
z-29Z%91xo)J{}ax5Sb!=kUc^m0$~}L1+uY-je&sygo{CL;$Y-p0gK>HR0=4GDvCLk
zC50)CDV;HjHI*%$F^WBfIha9{<rX_6H5*p(I|nNm8W<`-_zH$EK>^^W$x$Q>a*+s#
z5C!?1wIs77ClzdY6lY;+YH>+seqQk{R&Yc{@f0K`XUAvef^tYPB)q_0025$;h%qoQ
zfP7jE@(c$f3nLd}5l9RaM7fEXdGYa@yiua?)MTWGU>1SGI7${FfRKyNNX;ooEh;uD
z0>x((KiD;TpmbWEUzBYKiAYdlj^Y4m2gP0#M@VXMiF0Bx#4|;bATNSk0`W1}#RvlA
i_FEh_kf^f*C7)tYLg8WLVH9BGU=rfsU=(2GU<Lr@loGxG

literal 0
HcmV?d00001

diff --git a/examples/example_simplest/instructor/cs101/report1.py b/examples/example_simplest/instructor/cs101/report1.py
index 43d5b78..ea4f3b2 100644
--- a/examples/example_simplest/instructor/cs101/report1.py
+++ b/examples/example_simplest/instructor/cs101/report1.py
@@ -1,6 +1,6 @@
 from unitgrade2.unitgrade2 import Report
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from homework1 import reverse_list, add
+from cs101.homework1 import reverse_list, add
 import unittest
 
 class Week1(unittest.TestCase):
diff --git a/examples/example_simplest/instructor/cs101/report1_grade.py b/examples/example_simplest/instructor/cs101/report1_grade.py
index 4639057..d844649 100644
--- a/examples/example_simplest/instructor/cs101/report1_grade.py
+++ b/examples/example_simplest/instructor/cs101/report1_grade.py
@@ -428,7 +428,7 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
 report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e'
 name="Report1"
 
diff --git a/examples/example_simplest/students/cs101/Report1_handin_10_of_10.token b/examples/example_simplest/students/cs101/Report1_handin_10_of_10.token
new file mode 100644
index 0000000000000000000000000000000000000000..4e5f827930cba675185c66c49d883c1a2cd35046
GIT binary patch
literal 70101
zcmZo*nOeyJ0ku;!dRR;HOA>RYcyoDkwN2?^Pf0CF%*-jCQai<)0VK^>KE<1>hod0B
zxHvN@Cl$=ePbx{w%u7uHaac1;QgcDddss^{OL9`D^st7fre+&XDJ||{FU~J5N=^mI
zLJVWC$}C9B%t@W%R4Ks75a7+sA_8{E;W(M#wNKtio@ZiU0AT?J28QHfLjyzoqSS)?
zq7p;Bg37d_{9J|7yv&mHqQsO`BRvFDAu|`GNFfNUmJ6rC_>9z?g4Cj7BbbKNvc#Oy
z#FEr_h%NEOC8a5;c_k1N!1n89<maZA=NDxg!t@lSmZcUIr^e@G7MJKKB&MWrL2U=Q
zrzEwwgo`UVC$YF#0UQb%Fj2jb)Z!B7#Nt#<D=q~E1%;H<GzF0A_{5YHjpEdtG^jX8
z4#d?<EG|whDse3=P0Y~%sWH+q($rBf(FE(psY=(-z(7aAR8vPm*V0sz3#=G!71Zej
zt%Irqdr>3WP{&BeI95|fA=+5SNXIZ%lMCi!a0o$t3kiEe4G0bO3^+k3*eWPF2P+sF
z7(ydR!B7e8uENsP;*!k#ykZ4gg=h_MaOo%*8feBUC@3p<<|XHprlcw)DimktrRSt7
zz*H-w<rgU!8Ym<erKV(-6zhSFDo9Muj)%kp*fg-qV!607(-h+4^AdAY<Kq=<Z55Q_
z<8u=;^Wx)`Ad#S~5So{qpPQSSSE5jouTY$sTaZ(!P*j?y0E*<wl8nr}bcMuXg+y?=
z14S21A3Rz?=4q6r7A55uXO>jj8bRU*N5X=Jj-iI8CKn>{%WjkoUOHi8oE;Mb0|*N-
zz!E<^@#z&*f?^RRVsa}%Nl?>DL0K^k90*EEkZ^U+EKAK(NK^nxE2u-1sVfwvmXsFd
zf&2m%PtMOP&MYngc`!dsp(G<!p*S%&RUtJe6%-o93YmEdP}>ypi&9dH^b}n6(jkEj
z6-P}T3dI@ur8y~3YhY<bK?59mAR9FynqiKFI0x)T4XA-B8W6W?YI1Rbd<#nTi8@KB
zK2(7CuB1F)A+IzyDYd9rAt5nAAu%sSAt5P2p(G=-SRt)6FBz1uU~YoND>wu|e$6aW
zC@#%~#HAf54{LzzQLt4=)J{@Bb77)(5)tVrpeR2nCpA~W&`Ke^D6<5VZsD#d&&W*9
z0NJ8|<Z5t?6%=LWm1v|XMdp_lDHNp^m*$ix<fkbZDQGJgDcCAhgF*n5?`xGH$qJ@E
z2$VcQ`2v(ZauaiMz~QK*1B)_f)EMa)>zL}8$D*V*=6IRl|FiG!+QY`k0K#JM)CMU8
z4dcP(t6o7RESu+*<`z_fvO!(}q+~2fOe)PuEJ=kGi7-AyHYKqnwInkaE(_zsj4P;2
z%S_KnErIc2SqWaOWabqYq$b1V^NV3B6N}Od5{rscVf^AsxD?1jm~cr(QEFleC}k@v
zq~zzRm%!W^0+vunO4b8~a&Bf`W?H2}W^r+8svcZ#VM?w7Oj}C60=O<I0SQ7%7X>go
zDK$B<v^W)EiZ0j`E-tY1ia-eo>Qp_)qV!TwjtT%dyhtM@wK%ybvjCJjZPi_&)sI3Y
zB>q8_n4Y?hLTW)~PJX(rl9G~ZMPhD2PO6mxS4cj%Tmr>4s2l=k&_o4DlvycoaoH&p
zRF-7q=P4u>7iXsDfvhx)PbmOJBTR38UQQ*V7&TJJFGAw!8R9cTLAOxB2#+0Ld5{r)
z`6a24ObGF3u|i6IYOz9Ieu)A&S)q7UAtWDDR7at-IMoWtF%Ul@B}*>1%)G>$oJt*#
zu^`(LOB6EGkWE&|ELJE;ECOeyM1|b^l+v731r1Ook(i#UsRJ%8Kt&$JXa$H<6^c>|
zOEZg75n4+!5=#`Ix^zJ0hJtRcLOwY1z_k!G#B&oX6_Qdx&Q2@M(c^N<FH*?ND=taQ
zOHS1RISk1rP}>LOxXe5Sm;7XK!YI~H&MyTuD4-TWTRg_`$@!qDFG<xasD!vU0aalF
z$e=`6v_o}+otdYQoROH9o~n?NS(KVwl3!E_s@>EPHbTnFqS8F%aMsOLK(#;*5<G@n
zT*axW3K=CO1;tkS`YHLz#d;8*>*W`v>l^D?>X(AbBK_ptl$^}GRK1Ln+#D_?B_$nD
zAmt{Ol%y8LgPTOQunephlvo~;T2bPWnp5DGUj$L33C%rvi76@ZiO_7Lp{`q~uA`8b
zSd?CDt8TBZqfk;=kZN08QltZFLM4{wl-T;^=cVc>fZBGp>i(cK4y%132@&3K&`8xw
z*RujARZVqVw&@mU<d>&bfC`}06m=a1g+y>wVyjeKl3$b>Us6<>s-&X;wJkU!zZ{${
zU>fvOQj0T-QbE-qs4&NG3n+q1OGvOKF|W8hwFr{Xlk-zj6`&>*<F=<XFSoQLl_-B8
z>K{loqo=2*39=zKF)syN+<>gaZBs#FadBc!4k4=?OH1-|6H79aK{;3fq)7o@rs*k!
z7N@3_<|veBq~<B4q$ZW7r)TD+>#1vUaY32{u$V<_uxk{7TmCxm@(h|L3PF7nC>s(I
zP!7o6#GD)`FEc$46t}sV#h`Q?56YyrZizX?sXCw*M0tEkeolO9Q4vH8+O!2FNCjI^
zDN~$Uqz5kdL5a3l15#6Gror6;%BG+s3NCd(+Vl!RLZBiMBn)YVfEx7hglek*Q3h@!
zfMhb$6qNLoKy5OZ8iX#8JWNza0qkQ?dm7ZRDANSxK!q}7je5leIhiFIN_t9~v2gQK
zb3jEd$RrqsnW$i^05S>2h4v^i(-bliixW#qir^uy1Wk)6@sLys4n-wRu%|&@%`es~
zNG!?F%Pa;3FI1Zznx<l~E`%m55rgn}QDSCss)B1paw@n))=;uja4jmzFS1ey$w*Zw
zC`#3ZnXUkFk^;zPB^?ErQxcPkK~BQ&799m8cny|Xky%`#r{J8Cnw$;Eh0szLQr_p}
zB!cR7B}5=W(i|wZQDYh^4vl9B4;&epY2YjY$`P5x3aFZ(X%^DB0EH$vzd^cqki1*0
zqfnBVl#?2tl30?cV5@+ZTc93A$|bO9g{Jd@qWtut)Z*g!q{JdyP^3Xz22EAA;IyTq
z0O{<cLUMx+%)Pcy=Ru8v6^NiBzgQ2sOob|d#SKCMOhgA_t1X1m0f!4LS>mTb*1$6>
zG6z(CfV3(rloVB3fx4HVO1daDJ+rtZwFuNwEG|ie^(B=-Dq&4VP~#NbaEUJ{$}CGP
zNd-FxMM-8+PO@H3YH~?wQED-!bVW{iW?nk71h`F^UyzztP+5@!vnV{jC_5=XKO574
zg2cQOP%o-jp&$iG7Hmp#eh$cHa3>ntKut+a1J#NtnaL$cs#8)FY+)59NJ=9oF*hkC
z(MlmY7E)kAyp#&cK;V`KsA2+@BalJ@A`9wyXB+A$XcX!w#A|{|mDIe_+|;7Pl2mBg
z(u1X_VrVf5(Fh7rux2A21yGTUs<luLB!;O2q$?#QS{-I)d}dxsYK3~NUSdH(YF>&)
zwjr|9L8?I*qyuCghH7Zn9N{9EYMA*5lMD4gV+Qz4N2mqag;#k=ehxyl0jNO;%384U
z6Qo^7K^>$>Qvp;Cg9AvfBtHk6^{DX!3kY0>qN-L$HXl_AY86s?DFkN-1zTtjL$5p~
zvq(W(K}lapK^rCp3L}u1UPVrE1!U+!8OeyuG<Zo1E%YIVVx)|ecu;>0RGFsefmrdW
z70IbN8c@}GCHe8-6bp4EDAvFM1*r)T(Fr#OSv8V-zzGnP@`^xhV+}(C0|N}FWT#dt
z*ea+OmFC5(YZsRkX=EeY3bHygO#!4RGfx3xAG#|*YEn|7vr{W$6>P!Xat(;WXt1dU
z>alvDUSDc4XbcFX7~ug>?Bh2MHP(>)1~LeQlZsLkp+yLiENJ`^RJJJCD!_^vXfp+3
zIY_=ZBQ>=|!Bzpp2C3BpwR_8ui#t&GCFkep!2FNM`za}U*{PKvAE7%RWE$9Hz2wxK
z9F3y<a$7?kh2;F4(%d}T<oq0MLrq9<DA<CGMRGI9Hy|5}^2@Q68PFich(M4wES47K
zmunm0vKW*FAvQxukQs<@C{8R(g{4Jo^2s^*#i_91gem|f;DQnbUvS6MFTcbszceoe
z+&BRZqog8-5@@*6$k+s`5o9%}B8AlISQ?QKtzc!~u_L8`qWrSV;>`TK#2i?CpP8Qr
z9WMf@gALATAesh{G!2!^NmZ~#YBqo!0+NDs0#J2pB6LEel@wsDoixy>2Xy2{FFmy+
zzqBN^D6J?lH?>#;CY)SaRFs-m0v6TK)QmQa)dMxjK;;P7+H??4!4_U@BqbK7f+RK4
ziZr9Gbd6&{r44*|3R2=Al``PwS+NG#6llUgcti<2G65ZE0rky_6*3abQb9uq3YDoP
z3Q4J{c?z%=L28PgLQEcL$OYO&fVB4Dt*nBa)WqUc@PG`ca}RR16;hBx!^Y0ePDw`r
z;vBI5kX+-L2ktk5$6Rw0^YT)QKq;d#zf_?p5hMi~#mvk})dK}DSTA(E6zT+Mcc>K9
z{?sUhdRAE>L%~*|DziW%S|c-By<9z3M<Fv>9X4|V<-+D})MFvNgG^A7SP035#TuH>
z=0JvmEhUD4oeXkKUV&a>abi(XVx>j~B=JB!k`ZmCV-TyO0HO_{v{9@MZ1N-&HqQeZ
zCI~781rYL7N<|51It3yQ^*w0BI6gizFS8^*9z4ULqo4)qM(HSMX=Rs#SWtzaSST(n
zNG;OP)Prk4R|*Yaf-PuJ6CKt*hOj}|7u2<cb|BGO%a9Hwymbs!1a7Os+P~0-HKh3q
z<v_H7hB)HWauU;RLyAg4ozu*m)Vva?5^#`%{DzFd2BS3V!8&oP#n#o$%P$8t9-+g!
zdU^Tf(Bdw!I5{&jJ_(Y1Z53bxyLyoE-S{-{$c}~*C@$SWb9Uenp}Z1XrIh^qTxfnt
z0!<Ia<SFT8<!9z;C`9LgyR12&M2T!Xq}vJ_UMkkm1ht`ZQu8!&^omQ0G7B^`H5Kd>
z3}O`^2@I0^l0apJLK&#J5+4tmvCGWQi;st>NGXX2Y0OJkuvN&*F9(^D1{y%tP*P3N
zSIyN|jZ{$euu}E4QVoW>5@M!Oa7kiONotCf0?1gnDG)<J1vz9GLJvIc1rB3u9c8Fv
zpnYSg7N{Vos|acxf|3%Vs4rH~wpCC9b-X|YJ=k(oZD6~gZiEghgE}=JR~G9iC~1Jk
zs&#ci$p;*|kRi-sO(jr3f|CeH3Y@aQEh_~jsJE3BAcm$X`Q#_2q^3YS(#7CmaVrJz
zgadfc8kTmUoqtF_UPnPG238XxDj!6rBF{&`R>8sm?08U&fCg-B6+jW82O^+}tT;Kp
z2vkK?*Fq{qWzZlIr0`5FEKSWT$xO@v&!j5kfZPZfdreJD(NhQpjasG_L91Kv2&6(V
zTo0%K0sE>5xnL<SfrufM6`+D8Jr!KwfaY)gz*QP(idF|~ElA86G~tV+1(XFrqx9gZ
zM^M6ml{XmXfK(w&00#v~GYEsEk`s$l<8xC>GV)VE#(?J0G(h5TEqd|b#&LW+tObT)
z1jG$`#rZ`g8ri9pwjk}u#_EB4Rgg3cvKEA)Wi&`BL_642X!zP<4@8Iw%CK0lg-@A+
zXTBk!3XM{jBE90$%#u`a;3?QD_*MpkxR6)|HU07+J<W3BdYT}2D=QQ#*eVo4tpSM?
z#%E-vq@?C4*eZa_TChhz63PllQlQzJBya-JDAbFG3|7a->nM1D^+J3G(uCF^gXL7n
zAeXkGf}$;a)DMw+K^l|u^GY)FN+Hc@kO0`{pbi2!&FX=I7L*{1-HP&aK@3m?YZM}E
zhUG-CK7?=~xTpd7Sr0bPjqF2&EQEzr92M#)D1il)pxwekz04G(fKyf|gp^|-!@&yS
z1{cPI`JfSzJW!WGDG)l5ub`S~rJ!1@q^gmpZK$IFaU8<2kg^0tw~j(;UWzTOxB&%B
zp&qQgqF}24DH|ZduyzZoa6D+5v{pe`!9B4gBeh5YQd(p}n^52=h0HsH3{gf-lW^~V
zQbKuVNk)8rX$ff71~hXBZbyJhDp215KBxgMJV7F$+6g*p0V+a4=>Q}QSE!efSR4-;
z;Z98fn-mWjMFF`X6x^TzPxb1d1O+IZQ36(3!6Pv_yAtkB(9D#r0$4vNg+LlV$V(4E
z)4!mWE!fw#V1+?oi!}9$O7k?(l8!QH!dZa?13^P!pb|k3<O8q^a2sBwV5<P@)qs4h
z3=X<1Y&|(>I3ksvkWMKmuYm#;xk?0QG}v$rhC~)L2ZD4VvLD1WkRvcMA!M`yBn43f
zvN9ffQpkkNVS?*r1!aX~@DMPlkOGN-XOzJ!4{}S3L0t(2NZf;F-HKC76u{X9x>5yU
z8-$ga2JV7@+>AT<VDn8es4W8Ox@G33BG;E-W8sMg;xUj|a<YOgctjbCTxJ@ifr;Ew
z#AYYRoD$G#v3StRo!rEd)RcHoRSinJdT2thdOEXM4?G|bR+m|<09OW15}@u=p+aT}
zs6N#L4a^po=4vQJ7aE}(2MX&#BXGvU9!0R`AIOaeUxRpHpF1X(WR|4{Ku5SWN^G(D
zLkAL7wkR1BQGg+g1rNT0GDJG4T?-q-bxtfODFqKFD=WD8`>BU01iAY9hq)?*cz6aY
z_;~ucVhMXtdIR^|HPqqy)ipJs&3_1`2_Ca{&MD2yu7p`uT$xvrSOLle(0S_=a5BtK
zQ$StV015#RMmQeC1C4`YaSTWbgb9W=uG9h248ny*dZ4ZtN`(RvN5_Rm7-gk`twKl<
zQdNnbOOcIGRzS@-5IaHX$TKe!)H2Mh0xb|gtR90;F@k(poRJ8Qt)f)uIz{BF0@)hq
zcn*B73Dj4IHY-8OP=gjE0?pOXz96B%f~qbCcjpKx1{n{jNx_3JAT9`Fx&b5#!cfJw
zN?OJS*wQjc4KjwPPg4p9O~Qf#G&3<LvkKIofS0`0(6&J>xE&5x1D^g;08JAsfY#uY
zD<tMAfQKxh-3gE_gxr}*yc3l|GEx<aQb8;5K&}HV_$US~$pM$daJQCcf|o~Sr{+{D
zq$OtNfQD2+4gq08j!{AnG%Q($l(I89S+69uC^s_?G-HCxqs5iQdc`Fv`K2X#X*s3E
z894J0B%)BtD$Mp2w%h~KlAEevn^&M$1R9ytP(bc=>4Iw(9R=uo2{@dv_#0xol7f<!
zLT+M(hJj9=kAkiOs9O#)z9c6V(mvEw&{WV?&`<&)Z3WPfI!HP<6(p*pP@I~apO*sd
zi9#e4>}(Yb^bFx0Q3Xu}Wl$pvGA9G70HA%}g2YNtv$+_q61O95l@zoT@_ev`cp>)M
z1f#A*GaRkp0abts$_l}t${sZNoCq4{g!cs?)5Cemso*LPv~mu#+Al4&C^auR6}`*>
z6=*2!KwE`kP~T5UOG`_kP&HFeHB%F*k(~u<X@m4YTI)z&L~}8*=@ffy3AO=Vb%QH?
zSY-*4LUu5c6F_MP96i_yHAn(N*9}T63hH|5T98DgkmsXoq6<n@#l@PM3Yu6FIM@<+
zFAu3J2QnI8Zw`|C!HEmhf`=G^?ox;%h+B(`aapgdKrq*Wa}|;8H4W%UxsC#?Go%Sx
zh*6T73mRIFFUZf#D=F3hkKDkA5>a^Qlk8wg*dPts1Ur@)d~ii%3#pILqq8tRItw(l
zRjppGZUw1+%TqJcGfH$65JSyYaEI$CsKa`0>Q)MHf2l)4Tir?l!q8DrhdEE(3S3M;
zVh<(=9az!?O_hRn8NjRtH?3>Yl6pyTYBDIxz{@vK+Y{PG##Xw4QxNg_gFqQhFeQMj
zgCr=BACa>TDDkW7sVl&WR0VLEY62}+K{)_h20-&Yv^4<<SM=lzvKvyI1ULo<g9iYi
zaROQ&4@+&}JgWdMaoij|eOz52b1fjvko*8*Wu_@W0vb|aW3vgQtQb0asi6cZ{=s_C
zN`EvTfV4pZsJIxqm<A-ItWaKz$PqdUuymzhs{ow{iAPcm_X}tayI2p@b1e=k&4XKo
zq#LdjQhybt7VCkVEg;L`>XqS18k80DN;Jxg6&&DV$hic`WYF3vusWC+9(B2ukjfaO
zN26Rp3+_Kn1%0?Z3YwbW;cvK;?Gy|kv1I@mI0FR~%rek)u7VbL0T*0IOTkdj0L4JK
z55Z$6(Rql>U5=WiL0p(Q>Q->)=_sg!(s8l6l|o@WQvQSt{~+>ap&m>#Xo?TIg9vUp
z%D4~AcL*;P8!Ff;q=8c;e6*>sI#1iM7LhcIL2KsIloYBFHr47^mm{fwCMrWvK2y?D
z(o)dK^MO<@hMExCNK-)*lsJoxptA{}@PueXX>KFNvtf%cqSFxLU{IIoD5NQ2I0m$T
z24pCKv<!|6NNRzF1~lC%<bwLhFhNbIJCVW`lz<?LV1l4Vn6iR%VljA|3uN&wC|Q6*
zAJ&|LZtw!F!+<i8rC~;)#~seeA^7wN<|GSZZUt;Z3TO}!<Y?q#8ag2W34z?qywZ|X
zP?4TeqOWfT85V@L9YIr5B?`KrUN1<E7F4z%rzi(Bz6xIEq>!njPz<eBK(VU?o<k`H
zCxc>eA(#moyG1lQAqfu>?`cZT(3RMri63nRgvYcMpv_v4Wg1XN>L@5dSV~}<bQF}d
z6~OXP_v$DpL0BMpB~8!-FDN-+Nl;25prIBe9R(#N@LCjTM8RT0Q(H+NMFNo|pp&@J
z$zsqDsXBPLL)}V21Ew8Ol7cE#=&nCd-3yv;f#ljE$c7oHG02MxLF;iLyFFkF1YpAw
zFg`>Uw)X=j3*&P^7pp;62Ee#k#rb(KHe%aHQk4-*upl!zI|si1Bfl83DmbaC9JJ>~
z1Ed`^v!|m_lAoQL2iiCU8iGqtEm24Wm#qr<Nol3U$&h|3sHF&9um>K4Nlz^SuV_uG
zG6FSP3R3gbH8sEn=_n|bmw@K$iWSlj)m)k$s;$snOX=W&XxL^njm+Ev=+Gv7lmQeU
zkO0mFW$D}ka4vuhk>-LrIG|a5m;n&`V8j2A60kZKKH3l48I-1As{j%OEk%frhZ>jy
zn)ZOLc1_7Ff~=*0WGQ8ZlKg^rkWGo{py@SqW${o4#K&uZ%mrzUkB7{{YN+d}YepNy
zf~L<Q_F*>?)Y&7_7|=Rg*z9a(8rUb`QSA75xKBat6ZKs1iW+tBI52oV2NFEs420J^
zAU`0x6{G`V7}TH0qfUs0SRiGn;Y5;$^x{Dti#9+E|D)Rp_5`G~p+k(IM>3MvAW;a-
zme5_g&}0M^EG{WZO@s<T73f7}7Pw{RK&G{z$};nzYQVvrlbM|wpP!VKnhY-F^Yxr6
zOHzwH{WWqc<Et_Yz$=v@R)cs+rD<uY;Dtm8g%DYA4n?z71EEDnLA_jE6ErMWm05sD
z6Uv~~FwkrYF$WZ(prOB_{QMFf(EgZW9q_Vn@CJwcV!iUjoNNu`U_lrL(+ctzD3^lh
zVpM66lFT%)fqJQVDa9aHYbfazR4PHs0FVM?oCZoCpehx#p1ULivIqj=HjwcMHzR8*
zL())`ngcRI0~WL3RTM}DAXdA>@;|cYu$y0%SpW?)u=8{j%An0^cnKD*4vh+RP-77h
z4WQmBcy%Lm%nC^ZlC@w>NPKX5K~jp6N5G0vM4-w+yIDa~Sdbm&pdue$oI&yvC?b$z
z0k+f!Ej9=y23W|#!Y@q|EiOSOfFg@pX#!r>XXYRch9V~bm<sICM_TegiU;h*fzq%7
zXtg??G^Snz$$V)@)g!WlL2N_>mw^&ZS{it*FEpW`mbln_kcJY&2&W>Pm!_kD%N=;_
zMznGuNeR023tEqt7JvpB<4f}6lM{0bN{jPSOQ37rbrkYTOA1O$;!`q<z#EC6=>k;g
zLRYn5X*uE9mH{55fv?1YF6)9!%)v)_!1V?C=nhN}GHe5yJb{D{r2Pu-w?MhJN?Lg+
z^5E^1;3=q5aLWifS`SeUYtJDsABE^vR!Gb#&Ihme08axz=1?HR|KM3D@NOz_VOXrE
zrw7lJpus$7Q#La%C9^Cur8F@IS_>D0S8^c=RfrpuoGKL{@c<g<2Td93f!k$Zvvojg
zGeK*kK%03%a-h6ejJzBZltIDmDx9_=ECi)gsHLFAo7kESP-(}aRL~g&pgB<e-29Zx
zw9M2L1%!ukDix9{!A{G}Q%FoNN(Jw$R{$qhmypn6h180~T!o_2oYZ1yc!R8lx)9VX
zgM|oa?H5!SVLD6_5~z@n2X&9CA??b{H1I$?R2#Bs*uxic*a9eU!GQ-V$spmar-wXU
z0t(1%9R+Y$B36H6vjsIk^D=V_P_nOrtpYTAK^hIk8o7`(1quTYhL{;&lnUBXRhAl`
zlMijQBNhk4YCdQe0#x;Y91qQ$h;d&{6q`}40XZyM9hYU`st1=OxE2CA6@($?fZE7l
zJ)l$r;y@eXAQd1Cji%^qQ27Vaf?@`E92%52qO-x%1lgci0I32^eq=)y3FIk2&4@*d
zN09N5bcwTv08#?N$}lrP-4HD;sDYqWvLMSK=?7YFMXN()!M=jBAxTbI0oK`4umv;q
zQc80RiZ!4*bwD`*v^LcQ+H?Rd(}tIpph;`cx(P%oL6oGBwoQ6!Npd-8ITARCpff$7
z1v>GdHQ|VDDe$#gh;D+mf>L}&VqQvSo)W~VkPa`Ry9?G0S|?K+4eb>}n&)NkHK9uJ
zWlGwRh}VNJ%uIuAdx47}mV`h?;FO>iLdqsv1trzuc-74K{Iq!0Ox0pYk)@=nf$kgF
zKqkV4I#8!T5-ZF&SVIKd#6tuaD0zcAyWo%l7cbz-8mtS{EKY(}&4|E;bfUo#hKR2;
zXhcC$5qg~g+Nhse0xIuPGE2b86L}@QjskQ+JfzTo)CQ1KTR>UT3baW8l=Prxg9=u#
z^C13%iiYGXz-w4=F_~XlqL7$Z3EIA&nU)F3|HYu<2z)pQBsIoEHrkhDf+kc!Sq`)Y
z3bZ2CFdm`+v_BC%O$e==K?}Mylu|2Fla(}~MFm1>dQN^)Vh(5@3pg+fKm$n7zydV_
ziZzlzofkdO;*X5f3Rpx!H$_0w3$%LI0J%jIroy%$M+Y>gk(pcqUbzkNH)sJC$f<h8
zsU`8yM4?dt-S!1<lz>!#Mq41p!`uqmqy#BC74nN?@?!G93r<2}5NBN!mz1WZDP*J;
zrGgI7Nlt~#LFVLO9Cm=TIvQjUXtir{Vsb`mJY;kO(szS&lE7<05gS0k4GpW9JW$j^
z9Rr%K0q-D%1RF#S`!GaiPAb?uO^71stOYEQr-HWH7Hg#BX+kXkS(0C@mz$lESp*V>
z>w+$@w}LKJRstQr0;<Wto&eQfNN$JPg~%nK<Oz0#jzWxjc~XqJjsmzBnTBL5nx7!~
z1|BS+O+~o{AU|j3=h-UdROKcrL9NM512s#M@{3d9j&sS+OH~BPqXsq9w{X|MJgp3F
z;eZ{eV5^Xv7q5X(0;(3kevB_DN=?hGfRsUSc}Vy`lMz~|!WC(PoDq|Uh;z{PJ0v@x
zp@2PdL5DO!&fNm1GT50YkdruI1Ad^DnE3_Z5&<cpDU>9pD}WbffLCNd+)A-$L3IqY
zAB`(OL8%C|^a)}khzG(r(nC>F46N)#RS(hw4O#TW0V*dn((oh-kY4P|iBRo;m5N26
zW@QPqnVqPRl$fFb$y?y0p^#Vt+Ejs-mq2!aFw_iB$oWp7SXTgTolPt%1`UfQr79$%
z9LA8Ing`y?n*wQ4CxbR3L5>bdNzF+G9rshBqmU0;W?K&0d7qMBo(J--LZU)mYB?yW
zgLWr?5;w?h7>2kZ6`CbrqM))5+<}Mq1-$-QBMqtu+!;iK1~jBW4F*VZjmc9|Qi{n-
z&ny8Ss-&l&rltVmfKKwtECHR-qT~-g5};TKCRI>coB?7h6l4~_OTzTb67AyR3@d&8
z^vn{6^vsf+#3a~>1bQhYrFtpZ`X!|qsrvARetIBlbahJ$z{~zXt^vCQnm<6wK+}vd
zpoUa_Q3>dPD%inEa4}GE4OL%USsarGs!S9z^I`cVxCFE!&C?&Q9%+X+TnyUeh4YJ2
z;Y`rFEAZJz2$8~+Tm&aG7oips_Hen(lGLJ-{QMkneFB|~M9iR6SSi4#(<?MVE9_v)
z4)tR4Kr>P(N{dT#H7Yb?@^aI1^7BChdXVkr3J_Dkf?#<_*;AZaQd$7&M#m>rLKfd^
zIO~8KdLS`aSqQ386>Jrp5iNUon-7v!loeoFAZ<v{xEN$W4JHmcM<y4R6v6Fn(7F#;
z;h0$rs#;)bVb((CyFoQiVs>guW>K*QTm{5hX!3-KLhJw~4e$UqbPEBrAO#h)8JQ`m
z;QeVZ8$fviY6z%CMQHWOPft%R(tuPoFx4O<U_)78V{{ZCqoyzgAmhNRX`w64K#ds0
zk_3pG;c9a7L3^Me27oLENq}sC)cqi1bQE$^i;F?WCPDoGQUa2It%Asbjupa`VcLM9
z0HOqB4`hKG$T(!@D?<+M0ZS@CcE5nS40)-kpgl;SUMO@5(-?F<O+jKwW>RKOW=W+U
z#9)vMK`Spo9)d&yd||ByXa^0<Dv$%vf)ct75VGnPHUfj#KZr0N<YYaFGSKn^@F_b;
zvV^u5f}93&Cd3J;d7vYbV7nHR!K;W<K_|}S7ZpPqj8>2{$w2M90#LIUdOA`uxPgb{
zZLqWAA*%-!Y>^fvA-OFzCruADOO~0Jo|&&;s{ku<G@!*V$X%etFd%ghA0ov(ND`5n
z6l@isi4<lrsPxLrM=}YmFvrl3su310AZ3|p3aCMW?hKFv<3R^_fC?C>qf(2C5MBkD
z0ufIw0;xgxR6$uGH#HY@Fjhu>PO3s?2|^RtL8umk+FmG;f=~_3xFAh1w?pH!0(~PY
zEZKow1#)|8MG5E_3y@w!gn-5|phM`8**55&N2DN4NzG9})dVt9Ss|?`HC3UsK%qE4
zHx=qVghL_W09sWFN*5sOp~(cH1QzM2o<|RL@aj}Zc)}b34s?+BA-OHkvm`YabyFQ^
zI}^<R;3Zqo^_39IL6v4&DQMgjQptn7Q<4uVxC|f)P!}G8b^z&_7(t{!tzS?h40K)z
ztQUo1IQ%pd=n^uhB&hWY%66cA^Pn-;)FM!u2(mU4G+_)K$bjZ8$O<<FTLnV}P~nuS
z02P29ommLo0Hp^C1d!_?X#lh^8on7BrUpDzjAd^!#5*9(uy_Ck0cd#-=xj@HP{YCr
zb~F$|3rHJ^5?e?LNdzAv>thrI9Y{myNX>y;lUiH?3Vl*d1s_cd^Dx{2&}tg&7|hWb
zg#F482gWDo7lF>GgGLTg3}Tl+7yz~b7O0^0x6n91aS`Ei802sehKB}dbs?y>0VPHB
zv=7QQu;dH2ALLt@h2S_ZPE1RUPfsm@tm*?-(6DrmYzs=Ng_wxE!wBI^kS3%uMh{aX
zLM^g7u$jm%gQNt%{Gwd2qaZyLP^QsQfK4udMv@>$qM<n%ln`wpn}^Us1S|uI$&&mW
zgi}&7(?BZ{K)e5wiZv88Kn&<H0I<xesh5_MSfUGRRl-gPfH_1HBBy{<=H(TE)=K0R
z=oJ?hm4J)`wJ;UH)@W%d7-@ogvY@~MVQ|!dSMb3zKWGtBQ7$Y%K#D*E4n?^Nb_yl=
zIcUCu^jXqCM@A^5f(~E?k4J&KvN@?mpmQA+s!Q^7YONLW@{4joTMj@Dfh^}%C{HX_
zsLm_Ot;OCGN>g%$9I>kl=|p9MX1qb`z>5{CL8gFC{>dyZ(a^-u5B3!#8o^ml0~V6V
zF_>GNZcEtVAg4hh3zTRfX8ITnmV}{zBT*oy2XJ6uMsa~2XxTU@98nSkD6WyDD2a62
zT5E-r)Ur(QOc^w?k&L#*h&EVa8u)0pt<_Pe&ckplW<r3~7DFjPfZ`mq6cAMK=qqR#
zrs`TKXoCwEoCOe69$GFT3LgbcqEi~ETuUm}09g(yf<Wa82t(2vsE7i|K$06|#s(7D
zAT=NiOB;UqC3p)OP<{oehhdn$flpK5<PPx^s3<8YN`;k5u&NxKh>$B3<Z23~I)jdZ
z!wP9s^KcrctPp@W>JhOZ1k{?#gq|S}D!EgOONtdh2VsED7X@`8(^E^pr=LSt1{UR|
zYeE})piqYOA3<!8L!iEbHdDZU(oul)jv(Dc6d!_5ru54%@dTaV3p$E96>|EMh7za=
z2bm%V4Vr@D6%lwKIjG4{>rxO!JF@xU!Vjc6tuzlyEdVe2Kn@4BMPU0yQghM}qxY~y
z0~z_{@vyWH@e20k1<0NS&8mP;pM-13L^_EE>A)R`=Rkp209{iIYC|JLVg7_R$`KME
zr-Ro+z*NM;J&GI>V3Q#SVcFT)DS*#WhfIxGfx;Ff4a1N;12YaDMj&apWw3CF*MPC`
zSmp{nBpA93L_s4jza-U40dyKP_+%;YiBbhc`N^rp#i=RQ3ZSudg_6{Y66h3+CJraU
zt%hX;kQ1S;VR+vS%7>;$Smr>4ALbx;W*RiTg2obpAV*!NfN}|F3@kUZLZKu-Jry*#
z3z<k!08bzx<p_`~Ko}B^O6b803s{&7p?s(V(Y7-q+)2z(IH)wu2OY!>PI72%HIS2_
z3LyKHu*iedK%E^ElV?k9fF2Ye>p&P{C3GWgNl_(eR;mPYj5{dvDnNS6G3wRe{0!<!
z)W)cTmW&iZN(j(cE@BU@m6DDEZ0r(bIx+_56j-_e<rHwA3aR;wtQb1_TvC*noSKxF
zoQ;SKWK|Hy!xia44s!w>&8UG~(_v~+qIU4wVhdkrC8A+~m^DYq(y;CvyaNDDi1>PT
z7;0_7!;7F!5^`1q8I4?Mg3i$gM*uX*r=X=kNJuKhfX+ffgf+S;XqvzV;)_w3ejJv9
z^~2;qDI&K3<PW4IfwhDHl?9-xFEIzbPXvl+a5@DgU3gh+3$p@IQQ1O^CM2s!$mBTl
zFd`J`lD;6;r73~VIe``M;Hesr_i^Oh6xdA%xrrso8Srj3s7+FuQ=+E;HVfQm!6^Ab
zp$oARTNWoAnIQcj3|5IR*?<&b&pRL)5QfPbV#!(HiW0Ox1l&wPt{X8^6C$yYm=d54
zfDZj3Imr-|3n7JOU;wDn2ahv?M;l>I0XYzwB%v7|vI`x<z2JE}kl7&Y4xRXfMKt8X
zmQ2_zDJaT8Cqg<^!cI`uQAo-!$$(VmpsA9Q)I3m43R-;#pLGG>i2$ifHNZ#dCl)J|
zr{?5<7O6wM2p;DxMwAJOpv4-gdEi<Z+He4^c><02Yk;m`K=qkIQff|qxu%|iXC7o$
z7IbE53g`^*j8urz;1jwEnZ;J1zyo;}gds->!HjbRUn>9&D5S_LsQ|6n*8r`-f%^*5
zxKcovoS2kfhMZ<$m-5&ufHlInX`pikKr7l{Ch0;=Dyb+zDYy~I1~mjgE0uH<N-9cp
zK+~$Adw(=_6l`Eg9h%4NP>iyI1hbU_WHvZ4M*$Mx;Pp!&&nFgT7U$<7!XG73U>q-x
zo=QLsc*xN<P_KY59RaoVKxqN&4^%IJ>M@9Apk#_F2Tq`%#Y>?0L-r(6y#VnJ$g8lp
z1epW!NNGtbG`T~~L2j(W@&+j4!7CjQvtdwW5DL8T1>zMXOF_CI$p!2qRNFAq3}_K5
zxT=H9yh4maN>d>DqTG1ogN8JUK_@%}rRL_BfsXCZ2OmMNkd#=2)cAxYBU^>k3Q(J*
zBpxcFQH*RjXnYn+Ee=UL#fX#xi3Nz)z{?Q8enX3qqSRs?1r$v&d63e_A9NvBY6|$^
zI^R_AB~%)qYe`@s?EqE=nYe^l1&XM6P-nLwu_zTjB>-yj>40agi(y)k+=MjquLrf!
zB{R7MHcbaHGB33pn{hgjDhhNGHnPdtpx_5hfogzV2$F`K8whbH$a+xcI0=+hK~vhG
zaswm?!`a~7+(~-z@!(s*Qd8pN5jx?9M#rclD*!Ja1s#bE&Uw(|v$KmqE0{nWP@^L=
zPeB8uDHpuKD?T2y5(~r!jlP0{2_&K!i?9ls7eJv7S_VKk6hK)9>^=q9{$cl`{L%uA
z+|-gpu(&NWjFDp%7Ixs#bkISip?P3t3jAmfC5V$iXU}AUZZwGp@7o0(wh!)KLu5<9
zhrofRtw8H0LO^$86@yN&SIEy(0L>AB57>bWC~6esBqpb3<maTM7FjER4-W&~Xa<@p
zECOG81RJElGbM(8_#Eu)Lx^KwtI0s~8&DDO_HB3<93tpYP?TSgT2xXA;et{dSQ)s0
zfrSewRe&0z;Cu&`2JO-VnT%F~fb&;8RE0t`QbdA`$c5w~(0P05kkzH>C7=Wt4cn3l
z(ypVBUIIC>7$ON0Lr#XE)RbNVTPGc@1f6^WojV>M586Zvo9b7JjRmQNE~3%PECxrv
z2FP^KOdDuhDD*7I^b#EfNb>^C!JsVS334IS<!HVI`3d4Yu#aIo#1%X>VDnYBAZg@&
z6f`}9g!Lfd1WK?VgB&1c6sLmjQNk7(I*@7&IZlv_QLu$b!;Qm?S5W@K9jzc)ys-*V
z0GR}W20GHVO^Ay@9zpRUSd%i`Q6Mg89c)P|bow5Y-avw2cY%F~=|7M>#CtGvK)OH~
z3akrR18DPm9`uqfaPtb3v*5de6pB)FQp*zaAZZWv_$bH%<K$GN$_Ug!LbowKF((Hc
zZ}G4wF2-;KDAj->6O?ekF$ChE7qHN=43Lx_%qTtd9E~Ntp=tmJ0#eNg3V#qrRfTRW
zbPp`ZZ?LKka-F9JEG)sLAE+RNEJubMmj^8aVe8@{6NQj?hxq_h@4#|B@^%zZmWSpo
z%*`!ONr*|<>(rvu;?xq2<Q$ax6BN!Gg*uQMN|JMm;o*oStYO!16oM82gH4N3hj|th
zU@%;$2bKg?z7Y3-k{bB7B8+H;rgK>NlAc-uT6qa>AVAc@8>wJ3z;y$>4hQW)NQNEh
ztpFVn2QL{wb`|J`gUq~i(DFFw{cebTF|b>WV9VNI`xe0|4&q`>P{{-F2FQP48PEkE
zI*?2Z%~x1f<$+Y{fVvskD7GUdH0ZQGw3!II3<`1{H2ShUkZF48_JX!=z%yoiN`8D^
zehGL#M|@_UUP@vKq7e;pI{3aM@cMqxzz(P(fvfUCS!546ISjrg9}?&&zCc^H1X%@$
zw7?j8qY2mxSUrmzT@c+M_kz_UI|Z_W8#FWwP8+c0r>Fs^iO;1VW3jpxBmv5IX-akq
z;fY0gnR)5>ug20*sD>pPSRw+u7rXyzK_L#kG7ZuV0^jQZN}7<R-G2Td;G_cT@Mh+L
zmO6nt(Lt$+#h@#HLBr&r703!nMWC}96hJ$?K&QSJmn1^((b9vKD<FR$LJ-v4C@IQJ
zE<q{-5wak)=p8rY9s)uo&L9IB0U3$`X@ukwP(}qEGYF0&*c1xrx?e-oXbvjPL)>Pk
z02u)QA1Rzzlmb6{7(7t`zF9aeF)z6i)}RB~3muw8EF=aAf-q>EHF#q@ObX>tZHOa5
z>Oe(bp-w!!hJlyANTCH%2ek-v;0i)BNCL|#WeUm)jtc4d`6&toMfnw#;5IzCYyck`
z2ucMd`DliK&hZAV2gZN6HL4~gpCRAwfONnyWT!`FNhR`m#;AsXPdPz7WDVpi1lCmm
z`w&$Z=rl{vd58*N?V#!pqyZG^@nDZa`_`B>IyifP69#xiG-~u`=0Q4P3R$2-flKlg
zK+DR&SBipmHbGa8l_yr}=^?c(AoXfIxL5#f(5pqb6cV=zL_{v)c0O>m7GIoLmI^Tv
zN96{(2?L=BRLOw?02vqRK@5nFQ3od)aF-4eZU`5`G$4B-8g!sKq8A6+`Uo=%RU=x4
zB+{(v81+2R#<Ap#R0Yrw8ES-q@&PDaftTwW=@nGQs9S;7SR;ihw6;ceGI9byE~`PY
z4Vw7`*EgWC0dP44nF2*F2Z)X+SU`Y|T!K_d#puBWa|<jul)>2#)Qki94SIGqTIPln
za-hSk!S|BEx<t^%L@{W-0%QPI$!N$io3U7p1UUj$qc}l|ZCF*U02)gG-9l7UsZd;+
zl$%*nk_v8U!-F5}a9DW+I-?8J%%}yQ<WgLkgphzctyWLL4Rn7HqDBGL?;sv>%>Z%}
z#KRyKw%P$Cj!*&O;j1A)@(9&f5+y<sq!dw8fOs$rQG-`A*qgXlKf)pl79Y`I^`Ib&
zjgC==%EqWe+s>%=f_Px%7-IxbeV|kZF$$~<%ml4F)=&cFZ6zHAL!`D4$TWz5K&(Ra
zbGdC{ja0ayM4AM;(-djB2>5nWJnjR{T04X8tWQY=9iEz-Uj~{^1+UjdDn3C@L^u}2
z1N#s(@&<3kKrKPdbl@;R3uMGv7CZrsIq`vJe;Vwb1yF{C+6@mYBpX2<1Z}x0NG-}p
zEGPzbR*S%6qA3c5eF^s|tb>NfB5)C2l9P(ZQ`jf$6u`Y`P$mTRo<V12WhRxDq(Y8(
zhBN^o)@K$gBr2qp=7Dcc2F(McfSL|TiAgz?pyMn`b5g)%Qc@~%+Xv(Y)S!d(vq9o8
zj4BU121O|;F$HaM9NG<mg#uDsg9=p0t|RneXwXaqwAfQt@C98uQ<R?ss=A>aTRjEG
z0?_bnUP(?R=y-+nk_^z&&!og61yH=Dq{4>u$`Xq*^Gk~r(h`$P@{2$fMLyDn0pM{{
zkjCQ5+@$=RVrc5k1F!Rkc6mYm1=S*;1~F*P17#-w)QuoD1>jSaa`e(N^HMaFR6%_s
zkT3`rf|k!CjX{FMK)t2p63|^qNM?W~K#eu1DK$zu3I)j}+UR9KW|~4KNGrI+hFJ+x
zoT+VyZEYLKT(BcR6)vKxhmLO)qR%fw{0|!{%Yha^(ApVkMggJ@+=T`;(9ov?P$CgB
zfrTxeA?^WRUYeO4pPZjp4BCPX8es$NLWoh1*42)+SJ1Gx0^^vN6fK3A7(IoUm=tXV
zO@$hms5VH{9<*K%q%_(!HC7>7TQ}AorqDh{9b!{a1^CnfaQ7}VCshNh&7kQYy)f4x
zC;wnqsQ*D10oy7RRe<9rF(*d@<Ufd!5UJ?2oczQRjYROFpc*midNJytawZW}$t$IT
z3)MvE1uviz784;OsEJ@TIXTekBNK95NMcS7C?ZV_43Lrsq=G|O3yN0s`cnyB$?G9l
z1(jB?Zl{hyE@%=FI%T7%qyx%68X)VS-h@VTVonYudxA$~70|A1fHko|(GDBSg?7kF
zKpo`7BFOG~*tyh7xs{+|2R@yo1Tz5S1yEZ~4>Y}rzVj!(s5B4MRYn~&M`!_W4*=a#
z1u8+oTi@Yw;Bzbu^b8^K0~ZFRC{WnLw@*Ut)`yI?q?CY0V?e4w7*=qBN)=m$lERc+
zJrJP*?wNo`jrH=Nfd*0zZto-(A$NvB!eAS~9UV|x9^xKQut9`CvKV23<Zsjr3sS8I
zT8fkin$>_XK$o(IWPnF4N)n6GQ%fK&D=I~H8<Ivnh{Z?_he&4TrpAM%Qo&{;ZZZKS
z1CU2Rj)u+{mtgLi1RH|naJ1qMEDdomWHu7Xex#d_K*~U)e+5a2Mc@tJc`1;X0SSP*
zx1bsi*RT>ujb3R1=v+$;^b!+fupU?e^o~$SM1cGS!f<Ku@E**^U>WcpE6`po*bM*(
zozScTYN$a^v{ulCI2R-Y3r(15pztdK-TjlAlZFxHAg6))%2*Q_<kSRP#I`Z;a*#~W
z1?RQ~pbVT?2EPJ4CAByik^;c>zm7s)a(+&+t)YQ|4#MFek744Z#3JZDfwnQ~)j6P>
z$7`#T5{qhU6r!tBa}o=RQ&Vbfs^Pah)W(9MNI_WvwE8x)B)&K~F(=hFr3AvYjZuf3
zty&D}=pt+f)u!N{GvxALaM<VR8iJ1qb;-{IZLEZ+Gw`Y}(1!P%N^6CjRM0J<pzN4f
zqF`yMs*sYOm#R>jSCR?3bq;!$1GEl@c@J90fh~b0PoyFhrVK0(iUd8Z=>wz;t^__i
z2NFfb;2VEo(G8OTB?Z0WoYd3;Z~>{80$O7MlLXOlbCJ_7$Q0;?bWkx4TJ{3Dp$)b<
zJwCHIFTVtQj}%BPq?CqU+lJWh2bKqqPl4}q0|zl^fqQXjQK~|Dssd;mFJyx+C?pf}
zDs%ITVACs*CLg4I2Ag>SU!(;}*vZ+Tn`BEs`)^=r8Ppq9(uH0gnVVPuz1AGG&%6RO
zFPxa03cAM)W)#xi@!X<(4e&TPC^(>*3cT|jlx0ARroq082aVUnr=+H3=B1`UFX97D
z8E3#807_TMIpEVHkp}!hhJ&X6^)hoI``Ka2L6(8eH%%?cfIAOV7lURgU^n@pr4*>!
zl0p0OLE52wP=N%B1dwyVw@np+Cw4W8voi~9jbQdEDJi*wPQO(E^>09p&P2%7RG?cD
z!H04{mUV*8*eT6P)dTxk59IRv6p-N{`xF!u5<rF~C=?f@CWDSSD^|$JFNaj=IjLo-
zIp9lIic1ocv%w2rvNH=5$}=)^Qb7Y=;3f7D_rWtG$SrybATw<Z6>?J(^NJOe$}>Qt
zI5|1R3c0CDIuJ=C6yZv!%as&TOOo}FT<4glkeXXiQVBj`CodgTVnf0RbRJt8*r<Zk
z6a}bLL0y~F6woMOa<)PS_~HVH!{Cd5AU?4L9TQQInp^^^yOOg(^(ZJbz_k^4CSMzD
zGDI)50R`5nsbB*#y{JS%Tfq<>mk<{z#Hhnsgpi&Dc*q>IWEvED(O|=4qYYwVz5|66
zQkMX-I0AHLa2_ZwN<ckrh?oYFanM>$Ss@`eKLun$0=R#YlnTE6OQ9Tetq;Ur&~025
zsmY*)p`fEsQ<FhG8_=+2eqM2YPAbH2$_gR=F8&%NsmU27`FWYynpO&+_Hrhu9}XGe
zj*kZofx;GofRZ<O5eMAw3gFZSYuZB`3{U-V*QTVFfDVZTCqa;_A+~3xDS*oKoHR(g
z7Rj4>Y2Z`ga*!wWlodQd<uj->&juY#4r+ClCsu-zCCqqGvl+6GGb2?2a<l^UhyjR&
zph#9$h=_==0_6|zg1ppR*h;{>d<9hFic1npGLxYOfD@>Ku0m!Bcmf33JdmlN9zA#{
zI1$k|2IUNdI*_+v!Gz7H(XgP1MJmW(_dY@c5_J0#$Q*E&7jz2^II8v1;z1I6$@$<?
z0%9d-gMEGq_=auJ>E|)(Hjucni%|#PH&&8g0Le89rNyP7#VrtRAiscYfVmXBSREt)
zzNrV^?awb#NQ4FqEIBJHD>#<rXQn75XBH)w<|Kj!a?*;y9ZIk(L6vPbbUmc9f<i_~
zNkOrdzJ5x6a<Lxd#0S0nqI7*DJ#+n>%%q~kqDp;Op_@^Xo1<Kkk*W)tX(}oxN-aq(
z0$ue1Tiy=ZGmjLc;Qj(g4&o(f?o`r))+8X`z`K};$^evZ;=wKj-AE2k-_Xevs8b*&
zDky+%oYql@2P5#hL-0kdsF^phC><<S3^EU<Q5jT8LM|8tEhtUPEGh<VegF-HmZTPe
zhf%@eAOS>5%7lg+XoV5T`6=*x12Y#|NP*m?2@Z1*E7~9yw73J7u;6|IMKZ`J(AKGp
z#1aM2jj^CcGN|DYqYh5rp!8gn3cbY$bo3=;p$XWVFgqc}z^Y%!VvW*Nuo6(=r2(^7
z2V6EmlQ2Y+0(6lMxV}eo7eod85TVSHV#rhrs<kjxnZ+;%z}pWx3ZTW<nR)4;rH7z}
zz8R?siOD6Ui8&C1K!rcpVm(;eg%}1Sp$P`IJ_%+%D1_ikU_eXQK(#d7S8(@&L?KId
zL3@B<j)s~Ek_1@_N^A-xl?9+-PG|~q%&SD4)&r>mbQIE)L8?K`fb`6=)Y5{Ij8sq_
zgfH0ur&pw82fgkEEDWpFKxKmh=z<MUnu3-U5NCmo!~-?5z{>T~;z9gGSW^UK4`>A*
zs1s3HkP2;lSt%Gmy$q5EX@;%*05z6CVxSgJdNL>h#Df!1QEE}K2FMMtW(z2oLD$QG
z3<YV>1Xb!F2FNzFw2+wwYG;5(BQ-!ObQC}<zw%R3!Hd2?feF?OG967L)Dl=6!Zky^
zh^ikXh+NZvG{Z1RXECfa1aY9=hsnceXa)qS0H1S<XdS>*AZV1t01}1@fHi|ME$DRX
z(!6Bo?Pnl=f|3EW`Hr-76xMc6Nli;E%_#v*(|{C$t69ipvXDDs5$$)h>s_It21;OP
zqb}eB;d6BqK;6HhOvs5_NR#8pky(s>5Cb8-kkkkAIH<(YD=tk2Z=-{yCWti1P2lk<
z*P^2QBIxw2g03C7FoC)oWCZxAC$Kpw`6b}_FE9tRS;7%zqYGqw86pWf?LHTBbWu(o
zETe!-14|>U^#SeHk5P9G3i1yEWqGhPc$Wug{QyV<2!lm2G=VNxg6jbtg;!dX3ep9N
zA&@SR2NP3Lf+4;K_vIj$j$6T36X=0TW^iu{e8vh^-@`&5$tY0g4k?*}od*wOSXxF?
zq^v;LAV>&8?w$e_I>p7M(8ZLHi5N(C2-GKo-mMLZNswnDTETGz8gYS`ff9=dRp5}+
zK)R0&(-_3a1?C_M#42!e8&q~7k2`_v0AXcx6E&a(CwLsBI5R0HbpY>VLw6kfQnncN
za!~6H9N3UEYCv@oZ1XKh3dRPBLypExO#z9+y%z($n++rgi3E^R*z_4l6m+o%*aq~q
z_L?A-;4vVOGO!Xb4{5#~67=9pz(8kmz;2)fk63^R4NXw9t*8Wa|0Kefpsb3rX#l=d
z5foP-oadupt6+h&X$Ukl3fiWJyww0HHG%YCxB@behI&gUszX59@TDxUJgAfc=O|DJ
zKu&^z?36%w2&4|0h(U{|(G`MjCV~v+6LS?Ds<B8Z7i0@0z@bA@koZIk258Kq?#u>h
zg(VeC8D;3zZ?+21W0*k7L71Go<xrgus`tV7_d&0fLzRWvp=}GkdI|T9a;UnXA&p+N
zf(rmpLlCKCMXt3F1s*~rTA2urW{?=@ZaL6V?MS2l@cGGL&{f@_b{weP4pIy{mKM46
zMNg-gsT(y4vQW8*)B_qy0Hvq&)DoA};$&#8tO2qU$vzbI2o7YHQ3E{L2D;w|5r$x2
zA{0UrGiVtw+MT2dstOu;pm7f!g)9h#(1c_^#<~PUq<o8!k&)6f5qTQyRP1+{qFM`^
zCIV|fI1}PGWXI+dr=n#TXmSDtG`dED85f%-O-;nLj<Bj9oX|mv<3W)Js-;qkOJI_q
zb^&CiQ!x0VU~u|HPPw3f0&6ZPN-YCTMuSqhb50^CdqUP{qj?EDzl3?KJ6i5Wn>9g<
zLV(JB(Dl9WYkwi*?;uYgV-zK@v<mMq>7hs?QXj|!ENf{%!YDdG3DTxC57PSug_@p1
zFz7_Qyv!2t0x-RTN;@T35Q21ru(AS%8Q}Btz;O&(!UUS|1Lap#_h3(9*g_GO&QPpD
zN?#yHfYTt@$YgMD4kQMS<D%5!lKi4nlvq|!RtPOlO@R#LfwqN!N|sW{8OvZ_BV0;y
zG5|Reyb==XBG@6HC8@|QV370RD&f<b5E0U=OK?boQ?MSW<^_c}{F+^aS3q_uLkCuE
z6_5)Ihz!Jb98C()6d9-t#&jG;X#+A1;v&!`fvB}F=ynEpz<}HdzA91!)sY|rpw|L|
z+DQ<1fv*B2=C*6_I#R^az05q=tpo6-ETEjAfN}f=)XA`mu0g3BJb(b7$N^1jrxw9_
zgYlqK(o=KtTvC(sixNxni=Z7xYzGm;wt|GhJBFosd8tKMI*ed(STO`nzo?rVpr@;1
zJ5LqtSZua~f)j)x9Y!3-mBRWZ@D3`}CWt0m2nD+0BD)+k=mj3y0~rc-V{T%0Dzpy_
zid>lcLGlodu!0K8QwA?ys?0A{L_|DP3df17;I5>CtpY}<Ye4iNl?KpmKFA@U9VewZ
zCE%VqR3UVO25N^JtQN608Wi;)f0Y-3&iXFWNXyTMc?l$(o0y)N49;yZQLslrWhdcz
zG_X~O@)|VF1)oC$83SwafErpLJ_x7f=R>T6gcjI-z4-W){N(s}(2!GpK8y!Z01W}K
zI*?z$DFZa@0X~}=mVhBs?(tAh>L|p6Vlh5DwNghR$R{5(vJ2LSWgrAxJ(QFdfaW*i
zok2Gpc;=;n&cQ`UXehzOm2?!Ml`=9*ia|RFLAxDNA*{rT;>;@0OqCL-^$cdmf|C^}
zjKQn7K>@D=8o*0UL0uxOq@<+a3yK8=usQ{(A0WPmjd`bMmSmKIPb1fdjC|`SL+PZP
z{3LzQgp0mUW|BTUU_gtT!OKzL^H{kMH=@iOgIou-4;(3Y&mjfv&4r#L3U+CHW{L)=
zAk$C+bCr-nT?3S@bQB=j3+ij+Wi8O|2PFM~b%PdNl_X~76>DUtRw5@wP?`Y`ibE0y
z4rf9_3#1l=k<BeAEddz`-ZNN=d@=#3;6!Wn!0bWRpPmZVkC`XIiXrww(<n3~L8%m}
zkSs0%jl>`qj}Q@<CqYL#B!iCVg*3##+bclBa?sV&ki#dy)qXK(3QD20IJHO@*8K(@
zQ328no^u7wWx=`>#h?Y4nZ@8EF;k0*6~M_KBnb{}=mG`E8h&Vs01fX4r6#86=I7;r
zCRdX)V6*TLWuV)XLF0;$_C}Fzc@bz)cOE3wg8U2$c@nnef#<mMOG`k`hdKcgnjmw)
zl3=Iek%GAhW;rM|gAZGUnhH)UuwVw63Qcxkan!~tED}JgN%D&z{eDEj29pI%jevpx
zE!jYl3v4J$520G28fl~hnn-XcM)3n^8W7Y%_f0Ly$WH+~7c$NV5kU45c+DEN0V~KM
z4WI@fL=$p@4Z4s9)Qdo9RnS#HTKonccLSRNNtQ@nhKwM9<xo5bn$IW(uO`j~Ehbj5
zRe;)qh}L4zx-M`C6ldfYq4Y_SG8k9^B+<o#vabR-1oXhk0bJ;Sx)<Pqq!@M3UN~^i
z2H{h`#N1TSWD&?Rh~pu5p%kYe*#TWx2kuS9r<LY`A`Nt~3ix(-BnN}d22J_F(v)U2
z*m(%YfVQv{rNXD%6rhO*q5y~UAq(q4y*$X0CU8{&u~k_i1hm6HvsfXw5_}ef0?22O
zX(&B~;CzL0(EJ<dbl}u-g~TM#-l5DAdx&0827uU`2}w1`DJ2=!Zb3@d@ra!`5EDST
z1az`4*pJ}AfutT#m?Bg_ld?v#PA=HxAYCB4OY<@dOQBnfKoJEp1=$JN1~50kU5OMW
zkopBVoq{HaGC{LL@VhAVlJiURO5l@OAd5j5qBYw9)ZNb1K{6903C;B&$3S;28R4yM
zKzg9ds6b2*hI%+VwGt(t65X7|)gXm8eGy>`aVBVkA0!7LhX+^=8gzLnsTGhb&=E5^
zC^K#lBSFy;pPC0+Yy~kdJ{5A6638s%xPgWeYy&h%8m))~Ta2TC1j`XwJVMoiw+JEo
zjz||6;d6o!DQ*DOAE25R<OrnD0=o}XkARCE9R+YJ5;|W7UYm&=Y>;rm7QjfU5Sn}-
zY7zAW*doxhC!~P_wE`taLE{WmjDU1Q9RqH9kn9SO0f;DtWd+bVETAF`Y$i(T20IUt
zQ$X6m_LFQe)cc@JnwbYJ9??Pp(_*ANN>iJSkgbLseGNWN+Zh`B;Pn<(3XXY|ItsbP
z>0p+EtpfN6FU+aMOn3!{UKS!H2WUbEc?%i~h)~W<K@MI}*#qs$L$_?}D1fKlLEGLG
zK>ZKUU{7{xrIL<9Ca8LYBx8^^u<1zt(Exh{+GB(!D^S}uGX=D?x2U*86YM`EXM#<D
zsstrKgi7QfgDL=@X#moJ?0jg%!*-Fwjvj!ni~*&5xHd$>)Kma<DwGhnEa@vLXk(-r
z@Vce=cm-_*CB1^|9QZgMxV%H!yNd24WzgyXa61~@kp?eCQ_#rNOVxu^tR?vhAl(Wj
z`N*4*l0oi-)m_LwMhY%ytin7CxjqQSgLxfnkS1~%f#M4JIw2f7U_lO2j(6A<;yO?v
ziPU5R=>nhim0ysWrvY{Zs0{_W;SPMv7xMjfAhjS2xef|)y&WhN5itZ!v_?7#pewo{
zNlsb8sk8)iSSfgMA^0X>(5^6W83H=`FefuPvm^(0B@Z~3km@*)`N)9+AE<=rSA;a<
zKwIL#J#mG^9MEQiO7LaFpi>0GQz1)`Koi9vV?k@~O3E`)j%fk;36dZoE(Y;ISXlvM
z|B4T2qk0OYX9HThnw$YX*&SwUacYU4LO8S;mYxsFg86y&;CuW)27)j+hzTSZXvPLv
z1~LNTRZu@oNk;*!A2o5o5(c#XhNvJM$9Qk8OGzwA1g#9rOwP_pMZI+m<O>)EMGr>w
zgHBgZEJ;KJCYBC0)QixvOHl!AA2=_8VhVf+Ad+Vx>%74iVu8jnU<m>eY2c-8pj-e-
zJD|~>pw#qC&}~1NdFk+BOO3SrqSVZE#5g9*4EQJr$V)+~phMn3n-Y`r3o1d&pwYGI
zKw~|#SRpedHLoNyIT5_~F10MR2zmk*$e{AXN)0VgE50H*wV(uY|21foEiVORxLyiu
zG!?Y(2z+=JXyqxS6P}Zkn1p`QPyuM+T7GE>w0917GswqzspUw%EX_-X`3&R^&_>Lh
z9FXBC${-83ky4Sef@?)0cvWvsW_GGVeol%4+>M}n!%9HI37}q3c4}%tu>$C1y=+kM
zfV&(}m7rDBNd3eh@URSOpo14)fk&<r3kq_;Cq$q`ijD%<dQdMH<Yx512QR44PtjB;
z&j)P{1s|Rb+GT-AX+cOqYYVz<5FRw3K@5=N5pIV%8#JU13Mvp*RtVC|FUm~M1YP!H
z3r<)dVK7!!$jwa8C;=T(0zRD+)i7}Umw*oIKpO0Yrq3Wy;slw290%}q%~*m7E(h@#
znl{9MC~DxMj*coTXlQAMr-BRJq*U-#PvFQ>%BW1wOwG$oR)WMUvf&EFC7C%n3Yo>=
zZ7#)`>3NAIrA4W_pj%5*i$MFRbrirAVr70QqF4nT&H_rY(1R-y6^cNcUUE|vK<5X7
z5=lWKXtiKTYEiMCf?Ix(0%)rQ<mO254v&n)<m}Ai3<bza-ON<b9q>t|;B(!;Cu}7r
z7N_cfD`xQcQzG<AGuTQ$9fjh2(7jyhkXx1$6_PSROZ$sUq07>tk$~(^P<((269roZ
zoaF^b9^Bvt<uOQpDGtvp$$%#)&|y-bC2i1o5m>H+=S@(A!^@ML9Ow#X@CFcPpJ4Ds
zF$ylO&i+A;A^t(2R8^7zbumb1eko{|QGRg=A|IlxFh%Ih1?@Zr#esrS5u)HjXj8IQ
z$j?JE8MFl%<ZJMi$?!aYqFG6yEHe=l;*j(Qat}xcwsIAmM@lmD^H5v|U4{y6&LK?6
z0WEj|B@1XyMhj8s!KwL0u#*=d;fp*z4C?r&fja&u%lSbrgw!ZdD?u$t$i2?s-O?Iq
zpths3f=7Nis9XV^uL2qpg)DM`AJGWgVGWN$kY%}vl}V}4NC%a0ptD+vL0K&`xg<Vb
z58OE_Mj3WMYJ{O`1~shU0ggHE4cZTZVi|H-W{ZeWw3;2^cU0SuN_UVF5U$Kj%}D{>
zJAmXPP+kCGXq4rZ<|1w)1vNPoN)nSwLFWp<CRAX2=tLlBLnCN87EBt(hp#^_s7%XD
z&q*zT3&3U?k!*#=8e9p|-KU8~=>>^J#fZyD!Iz;zT??`pt_iY%6VzZ*hVAWuDue7m
zRY*$K0~JTPnR%IMl?tHa8$sjPP&J^<vkGvHDf!TG@xqi`n9sl}V3!x?rzt?p(FL0W
z@(b9RpxXmsPS$gT7F7Wt4-{!2&BocrsJnuaPGU(asA&w^-vGM5TMx9xGqoTyCqLa*
zNlD4IA`x^ih?PQ2UI^qm08pw0jrxI$kwoZ}FGxNn&rSg{@1u}d3@W)nmx&n0rxbvv
zd*J%PB`Ev|6(i8*Zv+ptAsL@J3c7`$Rb03&0xN)+;+J2N3fa2{SzJ`C06+8`yu<><
z*9xFp<BJkgP{Kk7G@k)Jj2dns<m?^8c(4gjkGX;B>YPd)&|E9X#>5iP2ru-~c~F>w
z>Iu-PtDvY)gzjHc&;Xq+2)e6O2fV=n)bfHD4!v?i0eWX`u>wphsM1n^>Vnipy15GZ
z;I+))ofgo*hn`keTAZ3znxhw!2deK7i~d1N)-qC|(FVDf8Pt%>Q*g;oE(OI{v3_!X
zX;E=%Jk%zAh^vgj>nA}wP4x;YAx=&}RhR(sW+E&JKy`!NnWvDPk(igB3f;H`y2`R7
zUm->vVPyh142nwgP(xccR{_-yJ&0q#r>ldGP=ZZCqHnt~*0a<HZ}`$r&P@T8zj~le
zH{i>dbwFX2n+Q5T0W!{G3(qV;iRGYm10Jb41#bC85H*@Hd5~PCmza_g53Bh#V$^jD
zK{+K4G?{1{qizr4fL8p2Ch$RZ26zQKctlP|0d!}hZH&4<XznT%mgm3?3wX-cNYzW%
zvjV4H%@}pumg<65m%>gG2iXbQ3X%!B%^f@o7hh6T3YzJKSs4u4{Rm20FfIBisl}N^
zsVSh7xl40O2wDZ3%Ok<6MDP*qklY2@$^hP?Tw0O}+UJZjFm)m4dJ}04<ibl(c7jy7
z-~lhtmD-7*-VJzHU`i@pzkxRIf{t)d*HOS9f{rNHj)SxzPR-K;Ep7tc(FES7n39@Q
znw}0iZUAgEs62*r8evf$Uz7?e4dRPSN<sZL4G32UUim{)OCe~T9+V9U6(|Q}d14Oa
z)<YeI%=EncqSSckI2-t|Ovv0l^h%_X{2b7=ybv*1(-4$eL49g)ZJ-BE*q}6B3>swy
zRnMT+JkYx-LAe<;p9s#GAbom;AR$oS7$l6;1b`=ETku9Qy+TB53v_Uj9{6Mrm?9*7
zAPZr_ItpNKgEo+Y&YviQoc&P-YG%Qdz?OXKDIp)OjiqA`(*Ro2pH~7u#~9p{2HA<!
zI)X)k5-goVkIw@KrxIlA8OX=5@eSxvnh<T^6H-7MQlXk4Cx;?5VTmCm-(w8y*eQUw
zhTuQ<Qb|Vv<`&qwmv|kcqo4$@^}vHOdJ4`Nsma-p><KNTAv?&RM_Gan=7B^Y{1zrq
zyracABw;E*V;sVR>w+d!EZSf)P<>D?D64=X29m8ow+ev9AL79SGPVk6*#|S*z~UR4
z@R1kM+kzqw;yP#wvjwL!*w!ETu@F#~*g|QjQLs`IR7Di)As4++1+Zj;PyiFrf!Jyb
zp$MKW0rEZ!gY1ciWnW|tI0ZvOSy=&em=|b70aU#s_qssad|}-P$ZR;Yc?oLtf}1U%
zWy58OCD0BTiki%#oMgS6)MQYf8#-%@B43eHo|%`9A_H!rgNAMkDl2ke4b|}cqU@yn
zeCT~mC`J?{=A|SSgL<0<DKNi)d=54T%Ni)?%oAuX3|V;!Xfgvy4nDmY4O-g-G7w@b
zcy|-H?E=cFpz;NJkPt)~)W6C$)KSnV)KP#O!kh{|?<lb(6`IiWV95)9ZZkwD$O^D-
zBOL`$i47{M5PG4f9m2|8gcgvtl$7Wgb(pd7nV{Ly81-1#PNr-_Xuk=ICXhv#8jNsj
zfZ2j%KIGIy+*&|(ASnlN!N)J+)&aUs7vX*bQ0o_z?_krlAQM0xPp~rB0Z%2Vxq2n}
zu(PvJy^0!VuyDa?E~;|S3@oTp07r@*svOJ<$O)+soMFI6UV&<7@FCBjnH%uT4zz3q
zEw=-S=~d(uS3r;Yg)a4nxE%AeWr(qu@t+b8njHYu!6|wm7U-6;)Eo_{a=ns#@P=bp
zeGD-j<Q-_u0TBQvBA7v-oB>yl>@#rc1SLzzIWmR@1_qe!0Zn|`D#WN4mFC6AsB43k
z0cXR431kfzXQqKBnn6ZEoR8rckgAjv*mNy;cm<*|8e*~mC_#Z*zNy9VE7=fU!RkL$
zBT?fK*_R-7AdIvs0wrodvyq^pNWoSCRw}_m3#1Qx?sIWQYAR^85=^mPVsc4l8A=HW
z3eDvF9PlhNC?pVhJtYNpF$qSvfXoA%tp~c>L8B<Y+}03uopnxWZk}y&evY=GCS?3Z
z!4?$4=>7uPS(IOnt$=}sHfDr^{Dj5YqWp4gBV1O4awybpC<QVF95SGw(*w;q!jdRf
z1t^CC!Gj-ken~-zf-m@p4Zr*nH_*jO;6@Ip_=oLOfusY_u$+;x39{*+x)|0pz|t^<
z1~$m0kY0dNKv8~KW-(}OVh*g;keQzcYS4fbfiP_3NTUSa!hj}rsBBIuXyF-L65<??
z9K;G#{m7$&5P2m9kZllMX`q1^=&%xKFc3VR3Z5V@2JP(!pVtCfkeixU0v6TK)QmQa
z)dS5yfyx_*A?YA4(qZ(Vdu~9ophFm<t#plJLB$e$Gy+;QA(ckpHeay@*cez^L3jsr
zYdCzwqqsDuq*wtwqX8<tD^p7pz}JGp8W*6s%oxZp4s?(g(ujn&+6r<~6N^(pWAC7W
zTkzNd%H?s;AhNTwQ_@j@xCr7&BnNrsf%O)HmdWKNg4XVXk_u?BzbFwTm01inR}T`*
z5bZ_aDKlt~7JM!eq~BEvYTjxT!o01lkfC6!P?cGr5v`FK4c>ejtD^v7!S)Wus6z!1
zdre~@9TD(CuR=&xE!NP4H8C<2Y$-Pj;t7!3KsPi)Z+OmtCQGQ_GNP??3}ST@K(ry0
zHj32&<qpVFfl%1SA<zIu5O{L~@~U3sQ_5lC0h(2SZ$bdgqT*d(O~fiOkZVAO5^S)8
z`Vz3_ID`$#>Y%PI_*6WM#yRL9R&p->1-G3+gD>%EIf?1EptAyX6d<=YLb}B`n&n`9
zDBTFKR!BsF5-TzWD~7e!!S06zFz8etThO#}Drk^cFE76wR`DbjCue5HCqdG;tpaR(
zS`RWz4LxK|LkSeA?w~<QCD5Q%UWu(zN`8JWDCvV7kObNX0lKLZa$ulBbPi}_O94C=
zjBEj<-wYZAE7kxXy$RYHl%of_NTNVP6LgBEK`iVNaYz<T0u?F>WuT=k@$t|TW#i+a
zDKe!b9;7ud9khQbzZ_&v8pwYdN~$UPs=4~AkqW9FR;s>Ms=+XqLQGWxZ!Jwtu~GmT
z3^xX9EU5g4wzf(@+m#@JjjgK=br7_74%Gt{1a&z<g%>DQA=MAyi@=l=V2VKYqw0g$
z26ZQNSR6Er4^mmIqo4%3j9Woh7nGL3feaaSE!I>5wROQM03-!YW{~x~@J&y!Z6;|-
zptTgKDGHF`G038KD+MLcL8{<(C#>U$GMJ#F0NLRRaUY`6Lh8q$><R<LO=1zKB?F2Q
z=%I@c&EU(vpvUw>M8JFVAjNHJVJT=OK_cj!z~U0f(iPCTYsDq0i7Ak+iJ&V4p+O2M
za)RNSK+D0wzC+n)Ra^oQgJlO$ngtaP>8T|k4WNw&pq0Cjqc9*#$U$P@ou9~hK>9!v
z4&YT)pkx9oiZBcUsX`b5@gzt$2!rH6)40%s@j&K)o2ejixE{o$T6StBqNNJb55gd|
z5Lf6G=NEyuUf6=fkj>Qtcjusq8l(w?p`|!TEkr-WZJ^aa;F~hA1|`%CWms(3BF@nO
zg&!nvVI>Mo732^^a2P7sDnQRW1QpC6XMmcMd62$txh85G7MdMk-9>m`7o<&Dp-{nA
zp%7*jNT@I#I&o>M050+%{sGB=_Nc<;Koe8YBQ-!*D}d7t*7G#bns=})3?6&T(>4Sh
z`2}+tG!2660^#KRypqg3*i1V}q?ptjJz!ZBtPvXOAcG3Q1rf;WXsaPWlFAB5(h!A6
zg;k*r>?Cz)f45K%v>^#4nDijJY@r4r<Urdn;hU2|g*<F7T0u3_N<p<)NmV0H+fWCp
z56Quh@(V>fmffj^da(8m=xjhFYxQ8w9yD>#*0<_f1!V>IMDU&Kkdg#^+Eym?Fe~V)
zAV|P~2AV)gT`vUMfr1DZ#)DS*f%Y<hubBbe$^^Rl3sjVW`XBId7H|Oz5&@-9=r{_f
zm<1&YkT6^!##Lj`ksXkA;0>UVosy_qwL#H|e9VP{vVuoqayHV!Dahs#umPZa25n5i
zH!a#4fr@NUV;P*BZNUm5HetW(1-$h{!4`6W5iuqrg&(xtmI|^CyXjR5whHim6UgVv
z;PA`B)@OtUCUW5l>3o5*A1G*%t50y=gJn7li7aTQ1nEM`kr3lR&cMu<&~70}5~2)b
zD~>b)+Cq|F0y!H=K^f^pdQim<o<#>wpTQQMfL3dP_P^&tZlMIVA7DeE2<sp$=t@|S
zyKyHZtUdyts{!gABDN(VG7pC5K(>K*$%9;sqz)tw#-L@S=x1qQwHKrs+h%=GN=Coo
z0#>DGg16YE!f!o-A147$9H0roLWRr{P@`WDG=^MUnyaA@U1)@E9H_!5Gy>;C>=6ZT
zID#AriB%8_gu$LiI~Kjf7Mn*1ojQOp7d%D`${yfxY}jBk{ElD+Wd#?1zZmrpg&<d7
z|1ejD5D(8_1s_j8S8NdgDviLy1R62wa3f;WH8mhZ_Rx`gP4MxO&N-!d*_AL0i!1X=
z5-UJ?0y;;Z0#1_f1sU*#9iVUkVT2DrJkZEJHV1*^K$u{t<4h(X-5^|Oqz5Wmk!x&J
zCx8@zaG?=KA&POH3)Z9!(hS1NkYQF>IRUj6l%_oMGE2ah#e$YZq{1gT;d7=SUlwO1
z7J-`Cu$y_XXIhXw(4iz(@M6}?{5;U80BisWB#-GmXhw(jEC~h~R5_$~NKiG%N=R)B
z8Sw$}K^W5+AW;y8s<u_qGKL*F2offwAx$Y9bOtUcHo<`j3N2V*gDxflw;^i5jd-{k
z$nCt~sdvzVq;iGCJjlH#u>Ke^79;OQCgez^5b%l;&_-{N`#|eez<YkdWi;Hi<)DR~
zIhon1pjEetnK|Gw9FS8$n4lFH0f#LIkyNIF4)92Y9*T}Tr{Ft43)Rt(7$(&E!kU{v
znm|XHz|SX90GHg*{u{PK#;_U#ae<P866`3DJRb#J@ck7a3*b#k&_YCQ1r72K{s14I
z3#z@r^FpBN0or}QbMOZute~z$9RC4|e-K6}!QE6ut4+}p0a|ecsv;GX6@pVsz&QkR
zs4l!Hf=qkoC8vTbMbH96&>}eKA+abeT9E5NEe4c^qpbqygdzncv~#7Pjq@x}3mv2f
z(#%I{rDH@L(Ww@Dl?t{5UctwMI{vU46(oi1W@J}@`~)7Wz*^WrA_-kTXbY<X_^dar
z+{6kE1097tA6*k&(122Lv8JX1XuCmPiY<Kb17rszR<U&nL8c>iZs;hCp0WsP55mTQ
z&=wFVKr1q&6$+U-sd*)~kQxgkW{3MJi;!eRLe{~4JQTzjP&y^9pvRprz}Z3pR`M!<
zi(V6G@e9fvSn~#Y`2}r-KynM#j0q`o0vv;b!DElm2_(?YVtB6}G@S}5mOy8=y1GDT
zg+QJ}co(z|6#LPI2sI!b#n7=+4JBd@FhuBu1XMBTbRA?*mKP(=B-K$s8gPJ4)__Kt
z5$fSy0-t=K2O3l;4l2z<C_pj*t`<_`6{QwKCjTKWKxk1`fUC4s0IfAEFII4Xi=pHr
zB-7yvVPg0c=2jMgW;!)Mnl#E4wBY{KRM3apq@bw@9*u{)#!kTiTplYJz{cx9;SaM<
z!Bzoe639rnkd}g>o&k!H2ycQ1X`=IDK_kB4xh^Y(a?m0Sn025-Pr(8((?9|+Hb^0;
zbS?%77RH0P;D#oc3uYmm9a4xqZUi#{;SiMJCb;cLaae2!+J6MDM&Scph1Gf5hP6lu
zw-~e^K1~VK<b_#Lt6yD?tOT0k3_+EYlAe;5f<~SXq-rwMgwRHs3gA=Fij5SIq7R}E
zwH=QbX@{*oiB3~Ocb$$xno>2w_Yn7hrUoINCy>;^5d?JxELfl`78P<q9dekUCd{!g
zkE5u934$7RpsjYrC7|sk(52v+dC=W(u=ZJgG59oK(DD-~6XI5=Jj^iAgmV(;#G_Q`
z5-r?QS@3xr%;_McSs;il;7i9qDHzm7fi-TSQx?#W$j!_HEfogs-z?GBH-nzF3B4!@
zEUKUj+V%iaqy>{M$SDHdAefVwo0O8Mkg20kYz4YtMI#fuSsOgvR18iP#o#hC6EvJ`
z2+3>UrV7M$kaUoy<eU%M@lXPusM1zI_)i;rjH`mS0?0ZIs6%xWlpri6uw6O|N>F*I
zlXVo7AS{r)k|wAJ2C)rG3R4QnFG<V+9p0q`UWbDgDVo|!`X~~J1OiHQpuz$=6%Wbt
z;Gq^!j@5ugE28KHAAX#YT9TNV1J2{%#0bsP(8&s@SvsKIH#w=`#h9RS34*}`F|frD
zuwfAxpV)0V(6tfp-7{Im`FU^-(*B#IDkHcsWaBrS3sMD{>`$sHFUl-Q)c_d+nxNEC
zC;{Eo4!+R|wk;4`Dl6nCrIi*ZLpsTz2@2TpIN<Rh*a`bdRYssv545fkG$aHzOGiPe
zyaY7uSge4&Cp`^v@H*UPO;8YlC%VAH<?)c^m&F>HxdpJ1SNM1bD5jvn4m#N;Gq(Ve
zIv|6%xuC8UXb}U<3}}l0HerzlE;Ori;nNefp!5Yw2x$to3Ls(7>JivwH_8erpzHl$
z%ZFjR{z2R1L0Xg*O7aU}n?67VK(SsyVo8P`OkF(G5%KXFAd^9hP~zhubIXvs96`tM
zYU;&<mrlcHi_uL5tuG_mB#`}ZlR$gZ5WNMsmq9I`81-E65~CP(@Srt#MhY5KAfMs)
z7sxY^V1gDwAT1E{pgu+!5d$@@aGFh`&-CI!J_O&P1hN!_(Jco11(cvblIVen9+gB}
zkK$iKrXmF&G=v~28nl!KWDqRHLPf#HM8d?XG7CUT^rA8g+%iF@7=pY3RhXF%Qw35I
z4;mSYhhB1ItB{$m=TupeTI}hsky{yGm018TfS~q+xJjjHY2dk2B$ZHUa7IS6Sp%U5
zl&{M{t3(rvL0&+LOl1%snwz0!f!qrksVvIRFVO+*Un&NjNRpEZ-u;$etXH0xldXXq
zLP*BJ^n$zx%E=%a+r1o_X^=Hdsd*{IAh&BM=@nF>+yDwP35L@^X$C2OX60w*X+WF@
zvH;25FdZPe3`s{(Y7WQ@4OsF)HvzH50G9WWJ&41Cs>}jt$blWGqfiDe2SIkjOS|Y8
zb!fE2sDm1&h&TbQj{`3cg$=|Z>p`*|q7PXBe2yftYLpxVQI9GF)d4zY0kmc;zqBYh
z6}0C7QToC18z^G*5U~PVEQKCB1k(pBpkcw8hH=OW$OusEQ7@Um>xj%8<e^+pe84bF
z362;gEy*Co2@dl>Nm~I_q2ft+G3t<OO^X%MkgHgji(xb(zRN)AC@l@Vlp2<H(28hm
z{zwBg58$;Ol7kVBOaqk@I9-F^1&AgTBz1uj6r`Dw4rvpV7J$}%#Fym9Cnx3>loscu
zmO!@~=s=ITh)>BZ0`L2RwHZKlH|)*`ENwH;iH4vYlvt7q-Dd_j5OsYLbmbfIYwBPt
z=0b|#N0dUE%7`u#ly9q~1-n!iDgxdb3!Y>v1vj{0?GuPJtig!98W$QD%8&y~K%oR4
zx`phd2VdU?+V%(C^#(3CL5DSf0vMDypw|{bTh5tzDVb%NDW!=y(3%{4m5~8b5e;#N
zl2fGuBt}5f7ogcuJ#d2$Y&_(ch_cio(2i}894Ma_qbzI%<z8@G4X3?GR)U-fwHCAv
z7HjhZD(_g7s*qfinpl#WqMw_e0=mx<baxrdQ#q9iNtK{AxS&!aF})}?6|{dBdaOf8
zXt6?SMPjZ(QE5(UF)Z9cmO~wh+^z&&1_2eu3^Q2RLZSe)a0+&EC~9aU8wg%ih~#Og
zhdlF=b4pV{fea2rkh>w_uBV4G8w3i_Y#jw~m?GBMLtThs50V`qzvX4-7NBHp1zQDZ
zmV>l9iZwu|LV}dSFvQgOqSTy3(2{@fMU9}-B(PMN&<+czvI4mun!yl_QcV=Ap(z<=
z56Eet-4KvHpcvLcYARfgfwYv+i~=>mA=*HR2*iUm^<fI3Q5KyIDkVV<Ks5(^jw)#I
zJ~|sb1(FSl5Rfv^a+_?>c6N{e)R0*8I0ab(iC5hH5Re)WR#t$T0_yN+X+b%lQ_exw
zL3=>ZQVn!-B(&HBw-TU=VaZTg0oKJ+umv;0r+^h}Ky~XVfEM88mDrlV+aI9i@TDaM
zr6r&;6*OxPnnXfMIQa-kThQ*x^wg5%@)S^Q0tqhYtQKhHQaot=Jz}pA@@b`z@CU7&
zjn7ETOUcZGANvOBej_^HVEv#KJ>UarL4E!hb?DNiGWgP3rT8)>ZAfy^gRNW5OoQ&*
zM~Fa9oB??VHl6_sCrAlqtDvM>9Iu)gpPv@5nyFe0DcF=$HPAf;8;nIbQwQo8XgY?O
z2W!lLTaAdI110_JR9Nwks7ArsKvgN$^V}fad2kFOA`sLgg@$|)XtW5Uo&lZJkXiyN
z8dEY$z$q7c*aA3Ebrhf*5g<hir25FrOV3FKm*-aCEgGOi2}!(4pi&s@N~lMn!Xcnb
zesdvpGq_9zon{NVq!m&ULk19v^*|*nq-uwx(|E{H3niJL9fY7X0a`5vTHtFK4^g0_
z01bLr#SU67uc4G$k(#We2`y3(YC(7TCFUTOq85M#x}c#3svL_ol0e-gJ<xzmMrs8t
zT46g=APETCHqZb$M-!&Rwjc*|vuIIfatU~4Jk<A~)oLK;>J_J!#6w-GQ2^Vg2XEMb
zlz>uNOdiq-anM>t13gPU18~v@IY}Y8*wDaGFC#xU6|^VG5H?g)1Ujs$I5i%$1Xo8P
zF(m~)q61pp0~+TC84g}xrx2c+nr(=@nE~~Pa?nT%NJD&L3g)@O;1y1Z#l@*bCE$ZI
zH9(4tbc{506hP$zC>4M(R%N=NJK+^fHFXqpEluHt3EVoU7r-ZR!lDwZSx|*wziLDq
z>KN%5$7<>*L>ucE=@^1W?%;k02Nvvf_8@RYVW<J2VK#sV-$CK*9IRkyV5k6*RWO7O
zmBSWWfNJSz4RFZmC>R=O#zI!)fNI}Vg+zs7NOpj!24^8d189aShE9DW*R!BSnqb$(
zs&hfolp!o}DA+2fTNoIaSehEA8K4036iX9}WCK%Eb2C#jQ?n#vLrW8gm{GD>s#%(m
zftj(Hp_#Fng_)_5fw?hAm63sI8i-|XVs2_~V47xTXqIYjY;JC5YHnn1Y-C_&X_jha
zU~X!bYG!F}VrFh`WM*h)VrFV=WRz-dU~Xw?Vqsx!Y;J6ns?G&U?zT#h5HeKaf;t>@
z@FBQ*AtI-uc~z4OQQjdn4`AIo_|O*@w8;S}fC#nnH3PhvSwt8>Kz5^aaLXJCvpY--
z3?R(Uz`y_wcKwvpf}H$Hy@E<`u7bJ?mYbkn<O0iqG7xxq9BiAnksf3^F&@&C1dq=Z
z!?YHsmXsDi_K$%jpnV63QN?+g1qFzUSU_A*70Lx^N+}d)l$K=XaB*d(DZnmv293rk
z!S2{rvf_ed2&`5>6SkoaXgEJJtrFHRfep{Xx8{RN#?lffSCb2Drm}(yI6%OKJE$#I
ztN=dlMFHf-g47}__=U{rnI$=i;L9lrN=s7pQc6noQnK|+N;6XRp=!Yfg4iIx>A@Hp
zkkT3?W~-#9rw@(>CD6$(#o%@XSWr(-A9|QaF<e1%PHJLad<tkmMjp7&12zKGdD2lx
ztpMjh+i3Op_<~CCp_=jW>N*PQT9A4f#DvGRdMw<tpgsP%l?wUgc?!je*{PsyoCTF&
zzr#va(4-HjbCM5Up$%&I7v!WC>w*0SYE6R=EQ?mx)6<6_s0zpp(V(IkbUSrPQE75X
zei7I!M*1+L4fP5tK@Njx)YH?4=}pN`&Q2{t*JzBYF&2^tzzG4IUi3icT571HnVzP}
z#l;oi&B!Fe49N<I<79%@K6xW~o{510gattP1B$maf*9Z!fu2qTawD>CP)1rhVPl*f
z69WSX3xPC1@s`HFOt5T)><VQ4%<(e8|7YLdwTF$70ffb%dO_5d#%xxYerQI7WFWnQ
zN@O!YZa~k>ApIb`r7@a^5j|@Mc(byBBv}|(82&RbFr@G^GB7YqJp~eDU|^Wi!;Lh^
zGNmUF9MF)Yj5tXNBm2Xms~E<JFU|x_a21!J$DHAm9$u88vMHTC5)dC^c8{j?s6rA1
cY6gJC2RuZk^ss=+;VC_=;P{$ST3o6J04iI2BLDyZ

literal 0
HcmV?d00001

diff --git a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..797cf46e4a10aa111b1af224256cb5ec15734be5
GIT binary patch
literal 835
zcmYe~<>g{vU|@)L(@eBuVqka-;vi#I1_lNP1_p*=Ck6(F6owSW7KSK>6s8pB7KSLs
zRHhV`Y^EZ+ROS@cRK^sx6t-rj*$i`;ni&}xQkhfOQyJ13QaBbdHZwLeF*2lZ1~X`K
zB{L!!4`MSjFfcfSY~W#FU?^cIVXR?jW^85*X3%7;TEwNGprGKMS(ciokf@N8SzMx^
zo?8i`)fI|TOG=CKieci(`FX{e#U+_}=?eL23MCn-3dM=JsS2q%sky0nCB+Jvc?w0T
zWvNBQsS5c;DXB$z3a)zTdSLUR;_)DJHKGl5jC71+H5H08@=J44pw=iv8|xV97{)4S
zB*JXeglN`ey2S$0c#8vSOo}FB6dS~mD;bJFzWWuYpOK%Ns$Wu?k*e>KpIi!dk$!1j
zW=VQcVoGX!K~ZK|Vo9ogYDHphK~8Ehj1ga)3FepRXXX``6qP2I<QM5D7aJNFf(<Os
zFUmI5E2zB11GO6L15lV2Gl2q`wFne@$zV2!&%nR{VuLU^z&RKg7-|@57{wXDft>^m
z=#uh$g}l<-q|~Bfg@nWeg~YrRg@mL8g_4ZSVuiHQyyTM1{5)9jfL&M#3#F2b)XXA<
z;?i6zaKPFrB&MWjB<dt-D%dI{Y9}dxgF}<)7GvTq#w0KYLa;C}Fx+BJOi2Mb3F3I>
zA`tr)M|^y4VrE`^e3f`)erb_HQEG8%P6;TAjTE#Mj1+7YUV{8nB^CsZTu}5UDir4?
z=H!5rizZ7ED1E;KMZ-%_XuM=+U|{gmWB~^a+={%!+|>B^TdV~|nRz7;CxLW8SYXF-
Z*yQG?l;)(`frA(1G!AwSRt_c+MgYT`*53dC

literal 0
HcmV?d00001

diff --git a/examples/example_simplest/students/cs101/__pycache__/report1.cpython-39.pyc b/examples/example_simplest/students/cs101/__pycache__/report1.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86b610bc46f684a93398565dc2e7f5f7b08ee22a
GIT binary patch
literal 1221
zcmYe~<>g{vU|`tCu9bM6iGkrUh=Yt-7#J8F7#J9e3m6y}QW#Pga~N_NqZk=MY^EHh
zT;?cdFq=7tC6_gdHJ2@l4a{fBVUJ>WXGmd9VQXPXVM}Gt;%H`$;!I&kVaef&;&Nw5
zVNc;`VMyUfWo%}S;!fpB;ml?#%1LET<!)wZ2AR(i%%I8j666+5##?MbsRj8(C7O)4
z#8b->b4n9SQsaxjLh;2Vr75X-C7MjPc#2ZXQj3aH<8v~LOKveIrlj~KgY+X~CYXO@
z7#J8*8KM|d7^0X`7*m*97^0X{m{V9<7@}BGSc4ff*>16hr>15bCNm=G2eDZh7#N&E
zL88IHz)-`mfT4sjg)xP(nJI;7Arm7^ggJ$!nJI-8B$C1aGS2TM$nKY*0L`59|NsA&
zAagPo{{R19lj#<BVsUY5QHg6|X<|+hNMa@9EtcZcoV1k;MZ63Q48LOZGxBp&^-D@K
zQuST(lS^|`^Gb^KOY<^I(u)#PQsWDXGRqQ6QuR|S5_1c3Qj1}X_~J}3zeGPXuehYB
zG`S?dNI$vQ(7;e15`Kny1(mlrN>YnU;uBL+*g^gRIgx=;h_Q+f8~}PSN!%d;3QR~y
zq%)*2E@EtEOlL@8TEqzVJ+q%C^Gi_R6@hGc$;`mOpvhb$$iTn=7AO(`2{09b(qa+F
z`XX+S6v*db?-%hgFfiQW0Xqa5$sh&Apb%wXtP(<UCsf)`ll2xye0*MFZfbn|Ew1?Z
z-29Z%91xo)J{}ax5Sb!=kUc^m0$~}L1+uY-je&sygo{CL;$Y-p0gK>HR0=4GDvCLk
zC50)CDV;HjHI*%$F^WBfIha9{<rX_6H5*p(I|nNm8W<`-_zH$EK>^^W$x$Q>a*+s#
z5C!?1wIs77ClzdY6lY;+YH>+seqQk{R&Yc{@f0K`XUAvef^tYPB)q_0025$;h%qoQ
zfP7jE@(c$f3nLd}5l9RaM7fEXdGYa@yiua?)MTWGU>1SGI7${FfRKyNNX;ooEh;uD
z0>x((KiD;TpmbWEUzBYKiAYdlj^Y4m2gP0#M@VXMiF0Bx#4|;bATNSk0`W1}#RvlA
i_FEh_kf^f*C7)tYLg8WLVH9BGU=rfsU=(2GU<Lr@loGxG

literal 0
HcmV?d00001

diff --git a/examples/example_simplest/students/cs101/deploy.py b/examples/example_simplest/students/cs101/deploy.py
new file mode 100644
index 0000000..3e9682d
--- /dev/null
+++ b/examples/example_simplest/students/cs101/deploy.py
@@ -0,0 +1,16 @@
+from report1 import Report1
+from unitgrade_private2.hidden_create_files import setup_grade_file_report
+from snipper import snip_dir
+import shutil
+
+if __name__ == "__main__":
+    setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False)
+
+    # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper
+    snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py'])
+
+    # For my own sake, copy the homework to the other examples.
+    for f in ['../../../example_framework/instructor/cs102/homework1.py', '../../../example_docker/instructor/cs103/homework1.py']:
+        shutil.copy('homework1.py', f)
+
+
diff --git a/examples/example_simplest/students/cs101/homework1.py b/examples/example_simplest/students/cs101/homework1.py
index 3543f1b..286b79f 100644
--- a/examples/example_simplest/students/cs101/homework1.py
+++ b/examples/example_simplest/students/cs101/homework1.py
@@ -1,19 +1,14 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
-def reverse_list(mylist): 
+def reverse_list(mylist): #!f
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
     reverse_list([1,2,3]) should return [3,2,1] (as a list).
     """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return list(reversed(mylist))
 
-def add(a,b): 
+def add(a,b): #!f
     """ Given two numbers `a` and `b` this function should simply return their sum:
     > add(a,b) = a+b """
-    # TODO: 1 lines missing.
-    raise NotImplementedError("Implement function body")
+    return a+b
 
 if __name__ == "__main__":
     # Problem 1: Write a function which add two numbers
diff --git a/examples/example_simplest/students/cs101/report1.py b/examples/example_simplest/students/cs101/report1.py
index 6c51d24..ea4f3b2 100644
--- a/examples/example_simplest/students/cs101/report1.py
+++ b/examples/example_simplest/students/cs101/report1.py
@@ -1,9 +1,6 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
 from unitgrade2.unitgrade2 import Report
 from unitgrade2.unitgrade_helpers2 import evaluate_report_student
-from homework1 import reverse_list, add
+from cs101.homework1 import reverse_list, add
 import unittest
 
 class Week1(unittest.TestCase):
diff --git a/examples/example_simplest/students/cs101/report1_grade.py b/examples/example_simplest/students/cs101/report1_grade.py
index 4a5f73c..d844649 100644
--- a/examples/example_simplest/students/cs101/report1_grade.py
+++ b/examples/example_simplest/students/cs101/report1_grade.py
@@ -1,6 +1,4 @@
-"""
-Example student code. This file is automatically generated from the files in the instructor-directory
-"""
+
 import numpy as np
 from tabulate import tabulate
 from datetime import datetime
@@ -430,10 +428,10 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
+report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n    import compress_pickle\n    dn = os.path.dirname(file_name)\n    if not os.path.exists(dn):\n        os.mkdir(dn)\n    if verbose: print("Writing cache...", file_name)\n    with open(file_name, \'wb\', ) as f:\n        compress_pickle.dump(object, f, compression="lzma")\n    if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n    import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n    # file_name = cn_(file_name) if cache_prefix else file_name\n    if os.path.exists(file_name):\n        try:\n            with open(file_name, \'rb\') as f:\n                return compress_pickle.load(f, compression="lzma")\n        except Exception as e:\n            print("Tried to load a bad pickle file at", file_name)\n            print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n            print(e)\n            # return pickle.load(f)\n    else:\n        return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push &&  pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x)  # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n    name = C.__class__.__name__\n    # base_dir = os.path.join(base_dir, name)\n    # if not os.path.isdir(base_dir):\n    #     os.makedirs(base_dir)\n    return base_dir, name\n\nclass Hidden:\n    def hide(self):\n        return True\n\nclass Logger(object):\n    def __init__(self, buffer):\n        self.terminal = sys.stdout\n        self.log = buffer\n\n    def write(self, message):\n        self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\nclass Capturing(list):\n    def __init__(self, *args, unmute=False, **kwargs):\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True): # don\'t put arguments here.\n        self._stdout = sys.stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO() # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio    # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n    title = None\n    testfun = None\n    tol = 0\n    estimated_time = 0.42\n    _precomputed_payload = None\n    _computed_answer = None # Internal helper to later get results.\n    weight = 1 # the weight of the question.\n\n    def __init__(self, question=None, *args, **kwargs):\n        if self.tol > 0 and self.testfun is None:\n            self.testfun = self.assertL2Relative\n        elif self.testfun is None:\n            self.testfun = self.assertEqual\n\n        self.name = self.__class__.__name__\n        # self._correct_answer_payload = correct_answer_payload\n        self.question = question\n\n        super().__init__(*args, **kwargs)\n        if self.title is None:\n            self.title = self.name\n\n    def _safe_get_title(self):\n        if self._precomputed_title is not None:\n            return self._precomputed_title\n        return self.title\n\n    def assertNorm(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n        nrm = np.sqrt(np.sum( diff ** 2))\n\n        self.error_computed = nrm\n\n        if nrm > tol:\n            print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def assertL2(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        self.error_computed = np.max(diff)\n\n        if np.max(diff) > tol:\n            print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n    def assertL2Relative(self, computed, expected, tol=None):\n        if tol == None:\n            tol = self.tol\n        diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n        diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n        self.error_computed = np.max(np.abs(diff))\n        if np.sum(diff > tol) > 0:\n            print(f"Not equal within tolerance {tol}")\n            print(f"Element-wise differences {diff.tolist()}")\n            self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n    def precomputed_payload(self):\n        return self._precomputed_payload\n\n    def precompute_payload(self):\n        # Pre-compute resources to include in tests (useful for getting around rng).\n        pass\n\n    def compute_answer(self, unmute=False):\n        raise NotImplementedError("test code here")\n\n    def test(self, computed, expected):\n        self.testfun(computed, expected)\n\n    def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n        possible = 1\n        computed = None\n        def show_computed_(computed):\n            print(">>> Your output:")\n            print(computed)\n\n        def show_expected_(expected):\n            print(">>> Expected output (note: may have been processed; read text script):")\n            print(expected)\n\n        correct = self._correct_answer_payload\n        try:\n            if unmute: # Required to not mix together print stuff.\n                print("")\n            computed = self.compute_answer(unmute=unmute)\n        except Exception as e:\n            if not passall:\n                if not silent:\n                    print("\\n=================================================================================")\n                    print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n                    show_expected_(correct)\n                    import traceback\n                    print(traceback.format_exc())\n                    print("=================================================================================")\n                return (0, possible)\n\n        if self._computed_answer is None:\n            self._computed_answer = computed\n\n        if show_expected or show_computed:\n            print("\\n")\n        if show_expected:\n            show_expected_(correct)\n        if show_computed:\n            show_computed_(computed)\n        try:\n            if not passall:\n                self.test(computed=computed, expected=correct)\n        except Exception as e:\n            if not silent:\n                print("\\n=================================================================================")\n                print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n                print(e)\n                show_computed_(computed)\n                show_expected_(correct)\n            return (0, possible)\n        return (1, possible)\n\n    def score(self):\n        try:\n            self.test()\n        except Exception as e:\n            return 0\n        return 1\n\nclass QPrintItem(QItem):\n    def compute_answer_print(self):\n        """\n        Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n        are send to process_output (see compute_answer below). In other words, the text generated is:\n\n        res = compute_Answer_print()\n        txt = (any terminal output generated above)\n        numbers = (any numbers found in terminal-output txt)\n\n        self.test(process_output(res, txt, numbers), <expected result>)\n\n        :return: Optional values for comparison\n        """\n        raise Exception("Generate output here. The output is passed to self.process_output")\n\n    def process_output(self, res, txt, numbers):\n        return res\n\n    def compute_answer(self, unmute=False):\n        with Capturing(unmute=unmute) as output:\n            res = self.compute_answer_print()\n        s = "\\n".join(output)\n        s = rm_progress_bar(s) # Remove progress bar.\n        numbers = extract_numbers(s)\n        self._computed_answer = (res, s, numbers)\n        return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n    @classmethod\n    def __prepare__(self, name, bases):\n        return collections.OrderedDict()\n    def __new__(self, name, bases, classdict):\n        ks = list(classdict.keys())\n        for b in bases:\n            ks += b.__ordered__\n        classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n        return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n    title = "Untitled question"\n    partially_scored = False\n    t_init = 0  # Time spend on initialization (placeholder; set this externally).\n    estimated_time = 0.42\n    has_called_init_ = False\n    _name = None\n    _items = None\n\n    @property\n    def items(self):\n        if self._items == None:\n            self._items = []\n            members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n            for I in members:\n                self._items.append( I(question=self))\n        return self._items\n\n    @items.setter\n    def items(self, value):\n        self._items = value\n\n    @property\n    def name(self):\n        if self._name == None:\n            self._name = self.__class__.__name__\n        return self._name #\n\n    @name.setter\n    def name(self, val):\n        self._name = val\n\n    def init(self):\n        # Can be used to set resources relevant for this question instance.\n        pass\n\n    def init_all_item_questions(self):\n        for item in self.items:\n            if not item.question.has_called_init_:\n                item.question.init()\n                item.question.has_called_init_ = True\n\n\nclass Report():\n    title = "report title"\n    version = None\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    @classmethod\n    def reset(cls):\n        for (q,_) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n        # else:\n        #     if os.path.isfile(self.computed_answers_file):\n        #         self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n        #     else:\n        #         s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n        #         if strict:\n        #             raise Exception(s)\n        #         else:\n        #             print(s)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        import unittest\n        loader = unittest.TestLoader()\n        for q,_ in self.questions:\n            import time\n            start = time.time() # A good proxy for setup time is to\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time()              - start\n            q.time = total\n\n    def _setup_answers(self):\n        self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                q._cache[\'time\'] = q.time\n                report_cache[q.__qualname__] = q._cache\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n\n            # for item in q.items:\n            #     if q.name not in payloads or item.name not in payloads[q.name]:\n            #         s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n            #         if strict:\n            #             raise Exception(s)\n            #         else:\n            #             print(s)\n            #     else:\n            #         item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n            #         item.estimated_time = payloads[q.name][item.name].get("time", 1)\n            #         q.estimated_time = payloads[q.name].get("time", 1)\n            #         if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n            #             item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n            #         try:\n            #             if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n            #                 item.title = payloads[q.name][item.name][\'title\']\n            #         except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n            #             pass\n            #             # print("bad", e)\n        # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct+1)\n            if i > 0 and l.find("|", i+1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n    # txt = rm_progress_bar(txt)\n    numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n    return all\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar"):\n        self.t = t\n        self._running = False\n        self.title = title\n        self.dt = 0.1\n        self.n = int(np.round(self.t / self.dt))\n        # self.pbar = tqdm.tqdm(total=self.n)\n        if start:\n            self.start()\n\n    def start(self):\n        self._running = True\n        self.thread = threading.Thread(target=self.run)\n        self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        self._running = False\n        self.thread.join()\n        if hasattr(self, \'pbar\') and self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar=None\n\n        sys.stdout.flush()\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')  # , unit_scale=dt, unit=\'seconds\'):\n\n        for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n\n            time.sleep(self.dt)\n            self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n    pass\n\ndef instance_call_stack(instance):\n    s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n    return s\n\ndef get_class_that_defined_method(meth):\n    for cls in inspect.getmro(meth.im_class):\n        if meth.__name__ in cls.__dict__:\n            return cls\n    return None\n\ndef caller_name(skip=2):\n    """Get a name of a caller in the format module.class.method\n\n       `skip` specifies how many levels of stack to skip while getting caller\n       name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n       An empty string is returned if skipped levels exceed stack height\n    """\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n      return \'\'\n    parentframe = stack[start][0]\n\n    name = []\n    module = inspect.getmodule(parentframe)\n    # `modname` can be None when frame is executed directly in console\n    # TODO(techtonik): consider using __main__\n    if module:\n        name.append(module.__name__)\n    # detect classname\n    if \'self\' in parentframe.f_locals:\n        # I don\'t know any way to detect call from the object method\n        # XXX: there seems to be no way to detect static method call - it will\n        #      be just a function call\n        name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n    codename = parentframe.f_code.co_name\n    if codename != \'<module>\':  # top level usually\n        name.append( codename ) # function or a method\n\n    ## Avoid circular refs and frame leaks\n    #  https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n    del parentframe, stack\n\n    return ".".join(name)\n\ndef get_class_from_frame(fr):\n      import inspect\n      args, _, _, value_dict = inspect.getargvalues(fr)\n      # we check the first parameter for the frame function is\n      # named \'self\'\n      if len(args) and args[0] == \'self\':\n            # in that case, \'self\' will be referenced in value_dict\n            instance = value_dict.get(\'self\', None)\n            if instance:\n                  # return its class\n                  # isinstance(instance, Testing) # is the actual class instance.\n\n                  return getattr(instance, \'__class__\', None)\n      # return None otherwise\n      return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n    frame = inspect.currentframe()\n    code  = frame.f_code\n    globs = frame.f_globals\n    functype = type(lambda: 0)\n    funcs = []\n    for func in gc.get_referrers(code):\n        if type(func) is functype:\n            if getattr(func, "__code__", None) is code:\n                if getattr(func, "__globals__", None) is globs:\n                    funcs.append(func)\n                    if len(funcs) > 1:\n                        return None\n    return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # if self.dots or self.showAll:\n        #     self.stream.writeln()\n        self.printErrorList(\'ERROR\', self.errors)\n        self.printErrorList(\'FAIL\', self.failures)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        # super().addSuccess(test)\n        self.successes.append(test)\n        # super().addSuccess(test)\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        show_progress_bar = True\n        nL = 80\n        if show_progress_bar:\n            tsecs = np.round( self.cc.terminate(), 2)\n            sys.stdout.flush()\n            ss = self.item_title_print\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n            #\n            #     if not hidden:\n            current = 1\n            possible = 1\n            # tsecs = 2\n            ss = "PASS" if current == possible else "*** FAILED"\n            if tsecs >= 0.1:\n                ss += " ("+ str(tsecs) + " seconds)"\n            print(ss)\n\n\n    def startTest(self, test):\n        # super().startTest(test)\n        self.testsRun += 1\n        # print("Starting the test...")\n        show_progress_bar = True\n        n = 1\n        j = 1\n        item_title = self.getDescription(test)\n        item_title = item_title.split("\\n")[0]\n        self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n        estimated_time = 10\n        nL = 80\n        #\n        if show_progress_bar:\n            self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n        else:\n            print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n        self._test = test\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            total_estimated_time = 2\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.py>"\n\n            # q_title_print = "some printed title..."\n            cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n            self.cc = cc\n\n    def _restoreStdout(self): # Used when setting up the test.\n        if self._previousTestClass == None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            print(self.cc.title, end="")\n            # start = 10\n            # q_time = np.round(time.time() - start, 2)\n            nL = 80\n            print(" " * max(0, nL - len(self.cc.title)) + (\n                " (" + str(q_time) + " seconds)" if q_time >= 0.1 else ""))  # if q.name in report.payloads else "")\n            print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        from io import StringIO\n        stream = StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n    def magic(self):\n        s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n        # print(s)\n        foo(self)\n    magic.__doc__ = foo.__doc__\n    return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n        return value\n    return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n    _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache.\n    _cache2 = None # User-written cache\n\n    @classmethod\n    def reset(cls):\n        cls._outcome = None\n        cls._cache = None\n        cls._cache2 = None\n\n    def _get_outcome(self):\n        if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        res = testMethod()\n        elapsed = time.time() - t\n        # if res == None:\n        #     res = {}\n        # res[\'time\'] = elapsed\n        sd = self.shortDescription()\n        self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n        # self._test_fun_output = res\n        self._get_outcome()[self.cache_id()] = res\n        self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n    # This is my base test class. So what is new about it?\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return (c,m)\n\n    def unique_cache_id(self):\n        k0 = self.cache_id()\n        key = ()\n        for i in itertools.count():\n            key = k0 + (i,)\n            if not self._cache2_contains(key):\n                break\n        return key\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self.cache_indexes = defaultdict(lambda: 0)\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def _cache2_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache2\n\n    def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n        id = self.unique_cache_id()\n        if not self._cache_contains(id):\n            print("Warning, framework missing key", id)\n\n        self.assertEqual(first, self._cache_get(id, first), msg)\n        self._cache_put(id, first)\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache != None: # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        print("Loading cache from", cfile)\n        if os.path.exists(cfile):\n            with open(cfile, \'rb\') as f:\n                data = pickle.load(f)\n                self.__class__._cache = data\n        else:\n            print("Warning! data file not found", cfile)\n\ndef hide(func):\n    return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n    return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n    args = parser.parse_args()\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err)\n\n\n    # try:  # For registering stats.\n    #     import unitgrade_private\n    #     import irlc.lectures\n    #     import xlwings\n    #     from openpyxl import Workbook\n    #     import pandas as pd\n    #     from collections import defaultdict\n    #     dd = defaultdict(lambda: [])\n    #     error_computed = []\n    #     for k1, (q, _) in enumerate(report.questions):\n    #         for k2, item in enumerate(q.items):\n    #             dd[\'question_index\'].append(k1)\n    #             dd[\'item_index\'].append(k2)\n    #             dd[\'question\'].append(q.name)\n    #             dd[\'item\'].append(item.name)\n    #             dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n    #             error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n    #\n    #     qstats = report.wdir + "/" + report.name + ".xlsx"\n    #\n    #     if os.path.isfile(qstats):\n    #         d_read = pd.read_excel(qstats).to_dict()\n    #     else:\n    #         d_read = dict()\n    #\n    #     for k in range(1000):\n    #         key = \'run_\'+str(k)\n    #         if key in d_read:\n    #             dd[key] = list(d_read[\'run_0\'].values())\n    #         else:\n    #             dd[key] = error_computed\n    #             break\n    #\n    #     workbook = Workbook()\n    #     worksheet = workbook.active\n    #     for col, key in enumerate(dd.keys()):\n    #         worksheet.cell(row=1, column=col+1).value = key\n    #         for row, item in enumerate(dd[key]):\n    #             worksheet.cell(row=row+2, column=col+1).value = item\n    #\n    #     workbook.save(qstats)\n    #     workbook.close()\n    #\n    # except ModuleNotFoundError as e:\n    #     s = 234\n    #     pass\n\n    if question is None:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False):\n    now = datetime.now()\n    ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n    b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    print(b + " v" + __version__)\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print("Started: " + dt_string)\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += " version " + report.version\n    print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    nL = 80\n    t_start = time.time()\n    score = {}\n\n    # Use the sequential test loader instead. See here:\n    class SequentialTestLoader(unittest.TestLoader):\n        def getTestCaseNames(self, testCaseClass):\n            test_names = super().getTestCaseNames(testCaseClass)\n            testcase_methods = list(testCaseClass.__dict__.keys())\n            test_names.sort(key=testcase_methods.index)\n            return test_names\n    loader = SequentialTestLoader()\n    # loader = unittest.TestLoader()\n    # loader.suiteClass = MySuite\n\n    for n, (q, w) in enumerate(report.questions):\n        # q = q()\n        q_hidden = False\n        # q_hidden = issubclass(q.__class__, Hidden)\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        # print(suite)\n        qtitle = q.__name__\n        # qtitle = q.title if hasattr(q, "title") else q.id()\n        # q.title = qtitle\n        q_title_print = "Question %i: %s"%(n+1, qtitle)\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        q_ = {} # Gather score in this class.\n        # unittest.Te\n        # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n        UTextResult.q_title_print = q_title_print # Hacky\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n        z = 234\n        # for j, item in enumerate(q.items):\n        #     if qitem is not None and question is not None and j+1 != qitem:\n        #         continue\n        #\n        #     if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n        #         # if not item.question.has_called_init_:\n        #         start = time.time()\n        #\n        #         cc = None\n        #         if show_progress_bar:\n        #             total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself.  # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n        #             cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n        #         from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n        #         with eval(\'Capturing\')(unmute=unmute):  # Clunky import syntax is required bc. of minify issue.\n        #             try:\n        #                 for q2 in q_with_outstanding_init:\n        #                     q2.init()\n        #                     q2.has_called_init_ = True\n        #\n        #                 # item.question.init()  # Initialize the question. Useful for sharing resources.\n        #             except Exception as e:\n        #                 if not passall:\n        #                     if not silent:\n        #                         print(" ")\n        #                         print("="*30)\n        #                         print(f"When initializing question {q.title} the initialization code threw an error")\n        #                         print(e)\n        #                         print("The remaining parts of this question will likely fail.")\n        #                         print("="*30)\n        #\n        #         if show_progress_bar:\n        #             cc.terminate()\n        #             sys.stdout.flush()\n        #             print(q_title_print, end="")\n        #\n        #         q_time =np.round(  time.time()-start, 2)\n        #\n        #         print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n        #         print("=" * nL)\n        #         q_with_outstanding_init = None\n        #\n        #     # item.question = q # Set the parent question instance for later reference.\n        #     item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n        #\n        #     if show_progress_bar:\n        #         cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n        #     else:\n        #         print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n        #     hidden = issubclass(item.__class__, Hidden)\n        #     # if not hidden:\n        #     #     print(ss, end="")\n        #     # sys.stdout.flush()\n        #     start = time.time()\n        #\n        #     (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n        #     q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n        #     tsecs = np.round(time.time()-start, 2)\n        #     if show_progress_bar:\n        #         cc.terminate()\n        #         sys.stdout.flush()\n        #         print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n        #\n        #     if not hidden:\n        #         ss = "PASS" if current == possible else "*** FAILED"\n        #         if tsecs >= 0.1:\n        #             ss += " ("+ str(tsecs) + " seconds)"\n        #         print(ss)\n\n        # ws, possible, obtained = upack(q_)\n\n        possible = res.testsRun\n        obtained = possible - len(res.errors)\n\n\n        # possible = int(ws @ possible)\n        # obtained = int(ws @ obtained)\n        # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n        obtained = w * int(obtained * 1.0 / possible )\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n        q.obtained = obtained\n        q.possible = possible\n\n        s1 = f"*** Question q{n+1}"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    # for m in pack_imports:\n    # print(f"*** {m.__name__}")\n    f = m.__file__\n    # dn = os.path.dirname(f)\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n    if m.__class__.__name__ == \'module\' and False:\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        module_import = False\n\n    # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n    # top_package = os.path.dirname(top_package)\n    import zipfile\n    # import strea\n    # zipfile.ZipFile\n    import io\n    # file_like_object = io.BytesIO(my_zip_data)\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        # zip.write()\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n                    zip.write(fpath, v)\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    return resources, top_package\n\n    if f.endswith("__init__.py"):\n        for root, dirs, files in os.walk(os.path.dirname(f)):\n            for file in files:\n                if file.endswith(".py"):\n                    # print(file)\n                    # print()\n                    v = os.path.relpath(os.path.join(root, file), top_package)\n                    with open(os.path.join(root, file), \'r\') as ff:\n                        resources[v] = ff.read()\n    else:\n        v = os.path.relpath(f, top_package)\n        with open(f, \'r\') as ff:\n            resources[v] = ff.read()\n    return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n    n = 80\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n    print(" ")\n    print("="*n)\n    print("Final evaluation")\n    print(tabulate(table_data))\n    # also load the source code of missing files...\n\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    sources = {}\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            # if len([k for k in nimp if k not in sources]) > 0:\n            print(f"*** {m.__name__}")\n            # sources = {**sources, **nimp}\n    results[\'sources\'] = sources\n\n    # json_str = json.dumps(results, indent=4)\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = "_v"+report.version if report.version is not None else ""\n\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.join(output_dir, token)\n    with open(token, \'wb\') as f:\n        pickle.dump(results, f)\n\n    print(" ")\n    print("To get credit for your results, please upload the single file: ")\n    print(">", token)\n    print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n    eval("exec")(report1_source, globals())\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    # report.set_payload(pl)\n    return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n    def test_add(self):\n        self.assertEqual(add(2,2), 4)\n        self.assertEqual(add(-100, 5), -95)\n\n    def test_reverse(self):\n        self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n    title = "CS 101 Report 1"\n    questions = [(Week1, 10)]  # Include a single question for 10 credits.\n    pack_imports = [cs101]'
 report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e'
 name="Report1"
 
 report = source_instantiate(name, report1_source, report1_payload)
 output_dir = os.path.dirname(__file__)
-gather_upload_to_campusnet(report, output_dir)
+gather_upload_to_campusnet(report, output_dir)
\ No newline at end of file
diff --git a/unitgrade_private2/__pycache__/__init__.cpython-39.pyc b/unitgrade_private2/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7bf2f7227e54c7b2301a6dc324a1119b37119103
GIT binary patch
literal 944
zcmYe~<>g{vU|@)L(@Yd(W?*;>;vi#Y1_lNP1_p*=Jq8Ab6owSW9EK<e%^1a)!kNOH
z!qUPR#gxLD!q&nN#hk*P!qLJI#gf7p%%I8j5@eEJNHWL}5EF!185kHi7#J9wL55Z_
zFff!bWHByas$pEnSj!A%GuJRLWGq%HVOYQdVlgt*Fcle;u%<B9Fg7zWGL*2@Fs3jy
zGxaOfvXroAanvx>uz;M=%go3S&XB^u!ob4N%*@D;Ct%1>oM6Df2!@diU~|e!SW{S=
z85tQ;7=jrz+5D;m!izFXGV{_Ek`t3NQuXxoZZVZ7-D1h9%1vC!c#AtZKewPLwYWGl
zKd*|_B|k4!QIqo)Q-1L+mV(5Rj9cs}nMHYtxv96<QY$ixONwu?=4Pj47TsbkD9X$$
zxy6!SkeYXkA7)5=L1uDxPU<a|l+xS+P1aj%`AJ!+$tAZq(=u~X<H6>zm!%dZ<rk;k
zVoJ%o#hA8|p-6;*f#H{{enx(7s(wjnMykF`esXDUYF<gPeraB2NqSLYN@{#TQD#|U
zNh+p@k$!x9W?p7Ve7s&k<t=WoC*#YDGD}ifK;h2^iV;R0Mjl2kMm|O!Mm@$NF$M;P
zWJZuU6oc5H2n0tM4<f=C7J}lK(NB}Ph>d}Pp@<zsa56A3XfhTFfY>0HgPmU_%D}*I
ziw9ykB%nYR7c+ruU@8(vw-F=`Pa=LOHWs;+FxN1oFg7#w%hfWLuw=1<5<?1eFH<-J
z6H?>{GZ-=yTOmh&9t$YSL0FT;?-o;0QW3~gMWA5NWJB{o5y+cGydYPz<m4x&Xfi>3
zB#gydTo7*+r6#6;q>DigXJ7&asUCBY6gW6SG+7}QfJ_7jFxbyvSx5k(1r>))ZhlH>
UPO2R!?7=2-Fmf<)adEH$04poUJpcdz

literal 0
HcmV?d00001

diff --git a/unitgrade_private2/__pycache__/deployment.cpython-39.pyc b/unitgrade_private2/__pycache__/deployment.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1183907f15d5618aa6e7584380d3fad693ad2d2
GIT binary patch
literal 1425
zcmYe~<>g{vU|@)L(@gxu!octt#6iZ)3=9ko3=9m#b_@&*DGVu$ISf%Cnkk1dmnn)V
zmpO`=5yWTAVTob^%fo2aDAp9#6y_9`7RD&H6vki%O}3XHll?TAZi(ckmSp6o6o+S)
zWVoaz=NBcG<QLsy$;eDeO=bcqf?_@f1_llW1_qETihCFt7-|@r85S@sWT<5-VXR?F
zVQgmVm#Jm0VP3$rkYNGyLWWwFaE25H76ulEW@bi)JOM+7Vlx8<Mlg(IU}Q*V=wzs2
z$YOC}hz*Hhs%5QZD`72Rt6?o+Z)R#{tYxoZcVURNsAVqUSio7sypS=5sg|RbvxKXL
zqnWXWQ=FletAnA0wS>EdGlfZlp@s`Yf^?cQbTSmC6nd4gwlkzLf_=?V!c)W2%;>_<
z%-GCW%aX#nfVYNaA!98|4Py#JHdE1^8m1J66t-3-Nrn`5afVvP622_{1p+mU3mJ?4
zf!TsJOf`%PnTr0E2&Hf|Gd3}nFxD`pa5gjbOM(3?3^I?A0ma8ch71!Ji?xvbT*Hth
zQp;V#y+AaDYat^eLk)KdLk;%=F%S=IJC`^E*gkQP-7q(aGt_dINGy=7;a<pC%Tpp%
z!&AeMCEd(a%Ui=(!<)tw%%I8bcZ;#;B_jg^Lo$ef#u+%WSQr=>gh9zhhJk@0ouP&y
zRwRb0ma&$phOvX8jIoHNhG`;GAxkjBN`@jf1_lOA=37j92DccqZ!zYqWQ^j;$;?YF
zjxWiNFG|e?EBNKEpOK%Ns$Wu?k*e>KpIn-onpaY+Uz(R$l3tXUk{Vx7lv$Qol8Px}
zq@R*nkdt2t(x6vRd5g;?C$qRDIX}0+PMm>(;WNnDRhmVqx%p+O@fn#ZDXDq!(B!FS
zlarsEm{V-0htPA2k?ZB(|NsA2@x<rlLzL?kR949aXXa++Bo-;8W#*(R6eN~p6f0z=
zq~?`mre&t4DBfZ$zs0DhDRYZ89;DzFOMXFW-Yu4*)WnoqT*U=BnI&K!-{LGRO)W0T
z%+D*n#R76>5hynlfs+3%_RPHEg4E=aTYTxMCB^xrMaiiUJ<K_&dAC@L67$kiZ*ivP
zmFA`vC6=V#V#&(S%)7;uUwn%N<eXb<sTG;UCB?TmiW75F!H(c`tw>HS0NHknwY(^^
zB=r_oe0*+xN@-4NeEcnzw9LGeTdYN3iX$npI5jUZH}w{0W^O@#QAti_(k)&H6RPDF
zdwOa~Vo6ESEspqjkka^gO}<;)L8&0+&N+$2#kbh=OH05uvw%s)v|DTtkKAGmyu}zF
z#gtQgi?R3?YjH_pQOPal)V!1^)?AQLx7Z-giW2kyM-Y;s;M7qBD#VJo85kIDiDHdO
zP$5!m0E%iRE=C?kEhZKwK1M!94n{sk9!5DPDMl_P4n_ecDMk*kEE^*iqZShv6AxpN
zIVg29`f0KkNq~)qr#K@$1QQWWMRE)b3`H6s0;Ch-I#3FSu)t2_uz~o-4wP$(K~Cpj
K6ky~4L1qBdhlc9_

literal 0
HcmV?d00001

diff --git a/unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc b/unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8bd23d3e204c918fa1d3cc8d5f223591f47ac4f
GIT binary patch
literal 3217
zcmYe~<>g{vU|={Apq2PkoPpsnh=Yuo85kHG7#J9e{TLV+QW#Pga~Pr^G-DJan9mf&
z1g4pzn87ql6bqPUjba7UY*B0}OexGQ3{mVUY$+@$tSyXD94U;!44UjOK{oj%vw$cl
z7Gz*x;ACK6a0c1AjFEw%gkb^0LWWwF62=<F6vk$zeyLj45~c;rHLMF6YuUmXQkYm6
zSQwg_85!~f3>k`z3>X-}Fp`0hp@uDmS&|`zMUtVGy@X`}YYlq}t0coh#(tMtjuN&7
z>@^$<8T+MbIcqo<aMUm?WMpK(Fs}*OJPg?d$g(LcDNH$>wOln!wcI6KDQq?DDeRIA
zvzg{HH8V0YlyIeR)Uc;;qKI%oWw>j&k<{_jFu`~=3@N<g3^mN+47EHZTq%4tJTRFO
z?gczG+%-H4nHU*Lc(eEx@YgUeWLzLn!kZ<ykg=AxhBt*Ng};?a5+W;9!?chwg(*b<
zB$Cc3&QQynBDg@fhIb)jEnkXI4WBqeEnf*&if|1d%m#?vV6#O)W{WeVh=?=P@<U>z
zhDCy*R-lHjhChudm_bvtY8jV;f`WooL1Iy2u7XQ`a&~G_T4qjad`^CHVo7Fxo)xlu
zMt*Tgd`WIWd`e~!ibQcqX-aBdNqk9uc4}Tc$N&`Spwxo=q7vtv#NuKU@yxv9lA_Y&
zlKi6h^rFO+)cE4$qRfI4sG6eGlF}lm18y-DCHdWAFU~J5N=_{<Vq{=oxW!(TSpc%C
zN(Cw$tzHDSD848)C$S{64B-p)Sle3?xFo9t;QI3tb5lWTSwM^`3BUYuh0HvK6mYP_
zXQbv7q!tzH6;xJnq(fY4TP1<R54OJ;^{Ygn%HmT}i%T-|zyWIe^8f$;|6l(7|Ns9j
zM!jF$1(hWk`FX|)y15EfYz39c`MI{gIP|sjz(J`gdW)qbGdJ}XOMXFW-YvF*%;fBx
z)LSe$`H3mFm@@Nku{%|kq!xSn7lCr)E%vC)0=LYZ)LUGs6(vQ9$t8(7Ik(s|^NI^n
zlS^)~78GUXmE2-0&L}O(%(=yqoL^9Riz&bO7E3{5NyaUXy!@hEFpIUgASbir7ISfG
z!7YxY#Nt#?Xxw7S%FoQZ#h#K`1mbE6-r_+{4!49b)4(lp9D#F79unQ4*h54XQXt)8
zEiOqcD!Ij&c8k3zwYW5=q__xFrrlzyO1i~dm03_E3Qhsg@Qo6NGLQrM7GugSR&anu
z@qq&qo~CcHr{*LU6sM-FWGK>OU|{&=t)G#fo2p+@nvtsSlAm0fo0?ZrtY4a!Spsoq
zK~ZK|Vo55dh><=<+PEc&84&SBrFnU&MSP&r(Fjyza&a*6FlsRJFbOd7FuE~uu&^)+
zF$ytrF!C|-G4e3-G4Zg8Fe))|adR;8aZ7OWFcldvFfb&ug7iYMByy44!2~LDvltgJ
zfeOT80WXFo#uDZQ%nKQ6SxZ=In4m>4TM6p|wi>pDjJ50-g`yL3p;*HXFM>Hr7{KN-
zE@UjqD`8*2QNsu-a!WWDaMf_Y3uaDm!Nv(Ln7L}W7Vtt;Vc4<)*%l1hW5}}Lf|;+D
zyM{T1sg$wkM-4ZmuuNe}VFMM1DeR!)HIJo+rIx3LF@-~%p@sugP>VA_*<6wgpaK)@
zegROC2@(hM1Z#K}f(lcX67~f`H9R#epyC*8pKvW7L<N`>0EOm4##(-e$sqkT94QcU
z1yZ;dh(N^C7{RJVA#w{DQ+QH%K`s<$0E>fdOJfvgSjbo_n8H^h2=jqBL#-fKg%~Kb
z7Ko?tr*JG}sufBRs1cgYkRn*aKbs*%s77!$!(6sn;S^!8TCkgDGt6bG6{!)K&5$Be
zBRrd7E_1DDiA0JhxMGs10n;^n3nXhq7l<wdSCNnqf|?)z@;%5mj0`1G3#72fiGf`!
zRwL%Z5GznCULuhqUL)Sj#K<s#sW1YPvT9f*7-}U-BvK@5BtSAXVxTHJm_bvr>Mpp_
z2+7DSRw&8H%uBCSD9<m-F1F%=i03OLm1gFoD3oNRDrDv+rl;yC6qP~?dPshPD$vy}
zNlaIOl#sem89lfmMWuOQBN7V=Ao{_kLzRIfb;}fzt@J~SQ;Ukx^R|(Jk)b|{!TKe+
z1y=eXvrvQ;>=g1#OA1O$^m6jkAyyYLF)%Rrfy*OsMFcK;i$KkvDo#i}rdLo|#LmFL
zP{alzK)JRE)LwfDs<w(aKw{jWVuvxNiVxMfRcsI^{9;t7Vopv`sA5%6P*YQ=lCV<<
zD$UEw%u9z>MheOKxw(mXDMdUC3=CD$!Qgrk5||1FMfoYE$*Cy{;8L%O-xX5WDS#>x
zh0J12=^{|sQ6vQ_LD^C(GK))!i+Dkf0+mli{2-P9h!6)WD9SA<N=+>SweX6BK*FG!
zrbrmX5&;p=f;hDVRG1ejfP~~ggak-8dr3w~etu4IkvvF91}v1AR1B_V*o#sNauSnM
zi^Lch7^3)7D^in7OH$*Z9=pYoo|B)XDH$b<rBPC(2r^Cyq>me^ZBe8Q5>^EfY9K-#
zM1ZP|A`K8r6GVWVTcicjDFQ2}A!R(ILN780NpXN;1ysvL@qt=95CvfW>42p{f*?Oc
zv7{vD=SHz6gDIA@<ow)QY~WBYzQtHlqzkeTRKb92nIe6V_eDWQrxt-~1f<qdJg8>y
z0F~^#0*tV_frpU`RzEOuFiJ47Fv&2=F!C^Su!=B=Fqtt5Fo`gVF$yvAF>)|+F!3=<
zFv>B4Y7ibqF2*8T1_lOACO<z-PDqS`qudJQAW*bI;u{>3;D`m)E?_MXm!i3u!zMRB
ar8Fni4iuKfptQjOvXTW%GIB6+1OfnSNKQBa

literal 0
HcmV?d00001

diff --git a/unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc b/unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9bdd98457f31c4fc351bf81ea01a6d12fb3d531
GIT binary patch
literal 4665
zcmYe~<>g{vU|@)L(@gv<#=!6x#6iYv3=9ko3=9m#X$%YuDGVu$ISjdsQH;4vQA~^=
zK2r{J6mtqg3R4bC6bqPUjba7UY*B0}3@OYx?719K98k5KQJg6(QCumkDQqnaQQRr)
zDI6^fQ9LQUDcmVMEsRmTDNMl(ntU%oF3@DU#g&|xoRJz|l$w}wiyOi&FUl-Q_0wd$
zC6bYul9HMipPpEfky;dAT9A{Um{RqHtGYTPH8G{OmW!(>wIIK!#4x@%zqBYhRl!!F
zx*Ec)t%b`KBvyj-DcCBgS64$hwYBP8d5O8Hwo292K@c-)Yn5Q;DcCANOo`9TD=taQ
zE6GeON!0+U*HJ*V5y6KVtjU#MT2fG25}%S;q+qL%U#wS<SdyWal34`Ot`Q%fmYI_p
zAFs)U;-~nM{P^U=+=9~Lywnm6h!r{t2$M9qk{Lna1;v~U3=FIc3=GboNSgqPG{zLB
z9EMuPOom#f6cC%SmN|tng*k^Qm${aOkpV2mQp;MylERY0+QQJxSj$$!kj3i45UUr%
zRLfq=QN!NMuz+nL10zEXdlq{Qdpt)bLk&|6dlrWcLkVXJTMc_N6C*>RRS9Pea~5L_
z8z@?PnVK0H8A`Znm}(fZxKlV&xO$lx8NkuYU6sP6prD}OpHz~VnWs=(Qks&QSE7)Z
zS6rT2RIHFxsgPQcnp|3vnU}6ml98%Vl3HA%keLT&L%gq$n3n?L<|z~>=B9#Jxe6uu
zAX*_Mvp8E1Y<v+YR6r?(J0&$Ou{5V7B{R7s85&A(3Zy6)6jUq>3=HWE!3--I{Z=v*
zu`@6r5x)ZTGxBp&^-D@KQuST(lS^|`^Gb^KOY<^I(u)#PQsWDXGRqQ6QZYr0^r6W#
zxhOTUBo!RP#d-ymw>WHa5_6MM67A#|7#Kc-oKq!VoLW*^5DyIpJ)4~T<iwm}J3W}n
zDp5Oy;M5XuxRe$s6qcqImt^MW-D1g3tt{4Lyu}Vm_^Cz3ZXkA9YEe>tajGWIEtZnZ
z+|*kfC5cI;If*5yx44t@b8=FXL0XH8Kq=)Gb8%(yEl!wu#kaWQixbOI<G~s37H52X
za!z7#aeVwO7Em(1#ad95nO9Q8%)r2KOEft@x1h8nH3jBsum?3cZ?Qols)!GyiUXSd
zi;MU{!3wgoNDw5!R8)M6vG5jSJW_abfFyWPf<hD&KT@DDV&Pz9VdP-s0b@QUE+!5}
zE=C@HA;uzE1_lO{e87a94|Et97-|?6Fr+XpWUOVZVMt-9VQgiRWT<5Tvl$jJrZ6pp
ziv%-hGW*@)h>y=r%*>0Azs1O<$#jb)Ei*6W7HbiR(qy{D2k{`Rbh*WpQi1LXuHurS
z%mT0>79byhe8vEBFQX7+kqQF?Lx@{4D4l^k2EsB73=E*$3kum{C1wVO5{48;P%f%r
zXl7WzxR9ZiJ%uTSA&04!qlRMva|-i9P!3~AVF9TrVOhXh!?2LCma~MdhBJk=nW<l*
zmaB$q0sBIRTJCU$6b2Rs7KUbKMut2ALx$ow0|rJgjAUSBDB-B#Zf30Iu3<}I$Yv^P
zNMTE1Z(*oqt6_tPmvAoNs$pNqSj&^bl){n2oy$|p3(CtW3_0AjeCZ4+oQoK1`BS)3
zxLX)H8QK}r7*lvscw0DX1v(gNxNG>EnQFOf1ZsG*cuV-Qc+;7{B7!MQDSSEHx%{<4
zj0`mbHGEmTHGEmzS-dcHH9}du>8v2NDg5FLHQeG1wcI5PB?1csYj_rd!=;3wM5u<R
z8NwD`AhM8QfoO@?LdFH+H4Ir2C6XZ5*YGTmTF6i$S;LvakRkwz^AtgGhGxbZ!4#o6
ztP2@yg^^^0#TjZvN*Gc^YD7{*L19(GRwI%kmLi@a(aYQ~1rAT?8evelqlBoCA;Sd5
z;!Vh5TEjenspv`#(*(w%GbxgJOes>eqUj8^Vl^xi7>l0Nh%Jywkp`KQB2yzen;}Iu
zMRtK~jo3oQ*$gRiH6kDp5IL8lR=k9vMjRZ&atq{Z#KC$%sX(DtB84eMK1U%}p;iJK
z8?cn208#~ZJ18B<fZff$kYR!1LWWvN43{$%=ODX0MX^>YMX5$goFPR?oFSdDMlyvV
zMHv)w;taKtDf|m$Ya|yk)=GlS7i(sMhl?c0RxGBULN>jIaRPIZ6iWJ40s9V-e&w>1
z7AV(nE@Xtqn5sBKt#FA-idu~@DDD^;Qq*guQy797G&TIHM3KrwSlOwk2QK~7XUznG
zo%U4{0g1`kpb7<6+kmRtVm&>*TZ~00#RjN80AYBYP{R<+Py}iPfwQ+J<1NNwFbhI}
z@^ld>(*}Y{Hc(TlN)u9crGqO5kTvm;q5`u(*JQjUgs4L@QgaGYi;9gjS*v8>qDFdf
zW;{$m6>nNmey)O^LS`<gfm0;_=EBt(>0yW{R0%@VDj-xPg9=Dcu>-;&HmFz@29;9^
z;6}p&NJeH#VQ%550T(|m49%e8lK~VWnk;@r^5{h_DBM6H1vW(uT%Xo3#L9Itlt9d4
zNMUT@=wJky)67`R(!ns1sgNa@L6Zq322k7rD)rMDY8YaLY8g8i7BDPiSio4rxR4Q|
z6C4McOt)CUCG9P);*!LolH&5rk_=6zTTFTew-{4bGTvg$0jnzl#nmk?o1Dzz5>T~e
zmk5tCPl8calnV*KV&qta8Kj3WQ<L!)b7EelCS#En0|UcK##=0@70Ee8p!x@Fe~}&o
z0|UZhJIX8uRZCg<nR&OkiVJcwOL8*vQj0a2ia<eMWC-#dsCp?f0I|TH=Y)GB0Tj_J
zpr~h*VXV?3-We##29#t#i60!{ao}WA%T&YQ!Vv3L%Ur@x!<@pD!kog=%iOP0%TmKs
z!vd<sQdq?qK-iq2mI=AEDC96?D0V?ED+)O}K<!M%W~N%E8c@f8&F>aNktzcN!!Op-
zk~Ce5m5jGIQuC7YQ!?|?!O0O6Ao&HUdAC?VeFja|DAqJkbVC9I6fCz`)4+`w#<Z1;
zMJAwt0S6H{bXaphU4ax($nbze2GrCLU^HRmVysdl7(!sp(3V$mW_}*nJum<M|NlRV
zJw78bFC{ZilQoJR+M&3`2I)lHVu!YkqBu+Pvs3fp^Gi!^v49$KRh;qQB1EsCvP#-c
zA-pIP-0*{yU&YBqnFS>ZCHYm-8YLN-#R^b?+{8+Sq*R6CjKreU6wO<V<wa&7Uz#&8
zFjO(?`YRNff>`Xjx=B@;1x8ie1(ms(d6{XMsYMFE7!|5y6m)fqQVVhtlT$%$T-}`1
zy!4U`TO$Jn-FyX2##>wn_0Yogq<z)waK|7&Pd|4n1>gLX%(TkPymTFflvGeA%gg~4
zy7@&4`Ji4}d1i5{LP=t7L24057?e>JkQ}e0kXfQ$tdNpelBkeh1a8h3r52awfIY4U
z4j%<jfvQlRnUe$ZWoBL(s12N+SdyBekXWpcsF0kIng~{&SW=RjTTr4`#jLKb{__9-
z|NpDRLqLl2(iI94le1G(z(#?pfG9RdNLR5#^0>mwR}2gcnyN)&pr$BaJS6I&RSTp=
z%UYb1np$v+Ju|PkAT_z<7JGVX3CLwdmY~440ui7F-YvF*%;fBx)LX16rMU&gx400+
zZ;?94a1L0XuLzVEi|j!f!7XFX^wg5#%&OG<v|G$Msd=~9vNH3s5{+(gyXKW;7Uk!G
zA}5L)RPe`x%D%kxTP&bp1GT&JQc{aRjaN`Q2Q}J@QVWW1F=wP!++qcHo^P=w78IoB
zr4$)~T)>oHe2cBPvbZEQx5y1-y)j6X1=KOU#gUX)oC-1qREni0Ysy4%Lz1sEs6%jz
z9nwupy~UZIlvY}t4C-94fm2gu6bmSMMRDZi=jDLnBT5L8+~W(1@{?1Gi{sNu^NO56
z)`2?+Olf&VHXxo70|P@84<yha{sM<nkv~WhT%y^5RIn9Pg0cuCyujI-4bmTuV$M%0
zzQvkaQBst6i#a(r1?;CN*5bs{<kVYC<%zc#6Css>EyzT0wik!QFF3P-yD!lC8a(Pz
z1}+FVpgj!^W-dk^Mm0tbW)4OUCJtsUCQwg=gN=iQhn<T_h>?d;gNcukhf#=;jggH}
zh>__J2de-h52FC1784sI9}^3+0HYEksLulF!+=cFVPs*H0{03)YPi6%5{x{IMOmOk
z$QBY365{5k$p;AnP~8abc@=@$Cq=#>Pk4if7*MPsH71}PQX@#Z0wt-UP>?!MHwoPL
v02>bp3UCPsCcxprVUwGmQks)$2dd7CMHm<u1Q<D>kcW|jiG_zlnS%oW%CF@s

literal 0
HcmV?d00001

diff --git a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a413eb7b3090c5d5d4bbccbf251cfec013c941c
GIT binary patch
literal 3274
zcmYe~<>g{vU|@)L(@Z=m$iVOz#6iZ)3=9ko3=9m#O$-bSDGVu$ISjdsQH+crHd78$
zE>jc}n9ZESoXZ@=3}&<Butc$>Fr={Lutu?hX|^afFwGvt4yHMxIKVVaoFR%cg*An(
zg&~S7g*}C%g&~SNg*SyWg{y@ziYJ9Tg{OrfiZ_KNm_d{8CCGJ}jJNnx%Mx=+6H8L#
zi&6{ni%K*ZZ*i0)CY9zSmZXB%DTyViC7HRYe#uNA9Z(GN4hI7RgEPoWb_@&*B@88u
zDU8ibHH;}t%}o7LwM;ck3z%vc7BVt2gfpZturRPNG&3_Y<Ovuu6dM>YFoIzu1IVMn
z44TY-w^;HEQuA&xm6vET-(pWsElDgXDZ0g+RAqFFwY(^^Bvq3+iX*EyKQF$xr05oF
zNq%-}-Yv$ol?+8Z3=9mv0`)WUb5r$8N;6XRUGkGlb5rw5iuFtLGE35n5>ry+3yLz!
z5=&AsMU3<_GE-7g^WxJJOEOZ6;!6v1@)J|^3My~0Cslzh2AN;X0`fSc5MvP^0|P@c
zBZvpZf(#4{oUovH#mK;r$xzE!!vG3^EQVU<8m279ET$C3Y^EZk6s97h66P$H1*|np
zSquvqYgtm5YgzLYO4w?cvX~aIr?4z!OktH|XlBgf$l?UCYFSd4YFSfQQy6l%YT3Xv
zcP)Dj`vRVY47D6JYzug6I8xXaGWD0#a+WYJ;HzO-$mqfl8xg}?%T>!=%TvSS!Vv3G
z%Ui>{fIo$OA!E@5u>ArxTs6E4nQHmKJV7uC6$7(bLFO!Es^zcYTp(1#SHr)MiIJgj
zLSYj|RDM8?${LOZ!V4K{7*jZG7;0E*7*jZH7;0E+7*n`x7-|@6Skjn+8Di^D+?IoI
z+XTiU9kA^v?x|q`xuZk`ECY383Qsdrzf7$_jlcp?kbO1$HmHeA$dF+IV{s3%+a@p;
z>P%oPl*@Ain<QAn3^TcgnMm{QAe&di7|fu_>vxMSH$SB`C)F+F7NcGfC|%rQEU#h)
zr=P0Kg0#$>)LYyo`33O>iOJcC>8ZDPA?o8Zb3wVSiaS0&GcU6wJ|3ilu}D+w7Ds$M
zNLPIPEzbD(<ebFf;`n%wU|wQwYJB`Hru^btECq=r8MoL|GK)Zhx47b=rp3qKVl6Jn
z$t(e>2dM!G#Df?`LJSNHx0o{XZ?QX7mZTPY`rl%Y$}DgLIgh10F(>;LM`~V5ad~D*
z#x0ht{LH*t>_w?LV4a*GogC?@C7^O5wTKnOWhqKcOwkm)#hjU25XD)PTAW{6l$=_8
zi!rx|n}LC$NEk$jfC!MiMG6cI3{hNFnFaAlrD<uYMYou%G7E086y@ia++s<|EGoXm
z3i5LCEfx?J#hM1P_!eVX6jNFnID10~UXay%(0rN+4!>dnPyyrw%8HCEj4aG-ECP&t
zjBHFYj4Ui1%p6P{jC@QYj2w(yjC_m=jCxEwj2s}y#K6eK$ic|NEWs$oXvW0H$it|^
zSY!yw+Q_9GC~Lz@xigFm3@MDYOeG983@J=t$ehB`%NoN}%UsJ+!kEI^%*Y5T1=(tt
zL>QoKb_iX<RKwED2xc?aFl4bbGp2B4vlMldFs5*VL_uOM46y>WtR;*oTs5p&tj(Yd
z7gxfV!VQsUVaQ?wsWN9^Vu)wUWT<5;0hz=GF=+y05y%BRAhjjzH4Is7&7fi+B!;P$
zy_U6vqlUGaF@~v@qn5LTGmC2hcL~n|-WrB1zJ-i6oC}$1xoWs-I8t~)nKg^Qh9iZ~
zhM|U|hOLI(2IL?98rCfS*$i`;K&BV2t6{5QP7wfQ=oE%hh6#*CawVJ#1Qvn|y)3>g
z{uDuRhFb0#<`f}Gh8U(=o?4Jif+-B8j1w4(d{TsK7_tP#8S<D?L~8j`L~FQfcxrfS
z_|jO!7;5>!t`n+Z1Qj^7{3XIQ{3&9PtX?9rK(vOrMgUY0p%g-ptUd=hYa+#kILOW5
zFy#k@XbEGA1V|)>A(%l^((h#e0|SFwh$hP|zT%Ah^7xF@oPzkYoW%57ykLH6ML}wE
zNoop|o1C9pPzvI*6=&w8=9T<nRQSbcTP5I@nU|QO0If$e^Yg&PK$Wpmr2?cf$;?Yv
zD9K1w&;wUm3ZOKuqfnV&s!*0%l$llumIDbETm53RtMYOzN>xZMN=+<DP0`QIPsvQn
zOifY9%uC5E%S<Uv%*m-#NU8*D&&*RuOfO1J1=V#5pscIl5)xXhkXn(Lt58&$lUiIQ
z<C&M7Q<?&D0N4VB%si-@^z`&@NkD4+_@dOD#FEUi)cBnI<iwK9{JdK%pmL>(MN3Od
z;TAhAbF;^1B<7`L=7B2TlEj=Srued33{}Fa#qp|{@%d@-s+p?A5HH?hDo?7i56M?Z
zPb~p?B_*>&AuYcM<me)WqSWHjoRVT4g@T;a#Nt$_d%%HEoSB!NlL`(XD}^fckbH&Y
z#N2|?;=I%nu>bN)OB53GDit810&#1xo~Bq4DAV3zEhx&&D=7jsO>Qygq~_fcLIfJL
zfGjQ&1C=2>pu!j?bc-`JuQWHcD6u3JTmlqHg32u^5TOQA%$}K73`z#zMhPesi^M^4
z5+FIY^wg5%@|0WbWvNBQpqN($Nr8(<wt~#$?3~nFEGebA1)74l*dT!&#g$)LQczkF
zpORU0ixFJRf`j80S4m=0PHKEgVo73=7RVGEkOhp{Q7n0xxdo6C)e2OO3KS$(f|@At
z`K2ZCNr}a&x7hNNN)j{kZgCXk7Z+zH<)q$XFDou7%FIhI0yRn@MKY)rgH()4L5opH
z3XLzxkB0}f8>mS2fEKA@pkfk)nfai_C<mhe6APmNBL|}pBM&1;o{N!#Nr;Jq1w`{O
z@i0m;feK+BMja+lVavr>6vDv3kPM0!kf9(93Ly{%Hy=&F&BqkRX2u%E62@i*a7JZX
zz+3`uCoN=L$XLq^Ds-C}YnW4*QkZ+0YFTPPZCDn+TP&#+smURlOt;vfp>m52<n!bb
zO}1Mspd5LNJv}EsDKV$G$O{ynte}u9zQvjZsswMbrxoSrW~5eVvKCo`WceU*WEc-g
z@<m}FL8gM7A|FtMpoO3iL{WTZUU5lcUP)$RNooZs<Ukb$1ET;4G8M%$FfeE``f2jr
zl7+WLjr8D5aEX&zRBQxJ!QiA@1Zrs(f#Tp6OIC4y9wa$}42AfB3$ED++!lqTSx^Xr
lErUccno~J!AdL<?Q2V49l(cymK@b!@aLmNP!ePc`1OVqzT~q)7

literal 0
HcmV?d00001

diff --git a/unitgrade_private2/docker_helpers.py b/unitgrade_private2/docker_helpers.py
index a91e4ba..a16d66f 100644
--- a/unitgrade_private2/docker_helpers.py
+++ b/unitgrade_private2/docker_helpers.py
@@ -1,6 +1,5 @@
-
 # from cs202courseware.ug2report1 import Report1
-import thtools
+# import thtools
 import pickle
 import os
 import glob
@@ -12,6 +11,87 @@ import time
 import zipfile
 import io
 
+def student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade_script, grade_file_relative_destination):
+    """
+    :param Dockerfile_location:
+    :param host_tmp_dir:
+    :param student_token_file:
+    :param ReportClass:
+    :param instructor_grade_script:
+    :return:
+    """
+    # assert os.path.exists(Dockerfile_location)
+    start = time.time()
+
+    with open(student_token_file, 'rb') as f:
+        results = pickle.load(f)
+    sources = results['sources'][0]
+
+    with io.BytesIO(sources['zipfile']) as zb:
+        with zipfile.ZipFile(zb) as zip:
+            zip.extractall(host_tmp_dir)
+    # Done extracting the zip file! Now time to move the (good) report test class into the location.
+    import inspect
+    # if ReportClass is not None:
+    #     gscript = inspect.getfile(ReportClass)[:-3] + "_grade.py"
+    # else:
+    gscript = instructor_grade_script
+    print(f"{sources['report_relative_location']=}")
+    print(f"{sources['name']=}")
+    # student_grade_script = host_tmp_dir + "/" + sources['name'] + "/" + sources['report_relative_location']
+    # instructor_grade_script = os.path.dirname(student_grade_script) + "/" + os.path.basename(gscript)
+    print("Now in docker_helpers.py")
+    print(f'{gscript=}')
+    print(f'{instructor_grade_script=}')
+    gscript_destination = host_tmp_dir + "/" + grade_file_relative_destination
+    print(f'{gscript_destination=}')
+
+    shutil.copy(gscript, gscript_destination)
+
+    # Now everything appears very close to being set up and ready to roll!.
+    # import thtools
+
+    # os.path.split()
+    d = os.path.normpath(grade_file_relative_destination).split(os.sep)
+    d = d[:-1] + [os.path.basename(instructor_grade_script)[:-3]]
+    # print(f'{d=}')
+    pycom = ".".join(d)
+
+
+    """
+    docker run -v c:/Users/tuhe/Documents/2021/python-docker/tmp:/app python-docker python3 -m cs202courseware.ug2report1_grade
+    """
+    # dockname = os.path.basename(os.path.dirname(Dockerfile_location))
+
+    # tmp_grade_file = sources['name'] + "/" + sources['report_relative_location']
+    # print(f'{tmp_grade_file=}')
+    # pycom = ".".join((sources['name'],) + os.path.split(sources['report_relative_location'])[1:-1] + (os.path.basename(gscript),))
+    pycom = "python3 -m " + pycom # pycom[:-3]
+    print(f"{pycom=}")
+    # tmp_path = os.path.abspath(host_tmp_dir).replace("\\", "/")
+    # dcom = f"docker run -v {tmp_path}:/app {dockname} {pycom}"
+    # cdcom = f"cd {os.path.dirname(Dockerfile_location)}"
+    # fcom = f"{cdcom}  && {dcom}"
+    # print("> Running docker command")
+    # print(fcom)
+
+    # thtools.execute_command(fcom.split())
+    # get token file:
+
+    token_location = host_tmp_dir + "/" + os.path.dirname( grade_file_relative_destination ) + "/*.token"
+
+
+    # host_tmp_dir + "/" + os.path.dirname(tmp_grade_file) + "/"
+    # tokens = glob.glob(host_tmp_dir + "/" + os.path.dirname(tmp_grade_file) + "/*.token")
+    # token_location = host_tmp_dir + "/" + os.path.dirname(tmp_grade_file)
+
+    # for t in tokens:
+    #     print("Source image produced token", t)
+    elapsed = time.time() - start
+    # print("Elapsed time is", elapsed)
+    return pycom, token_location
+    pass
+
 def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file, ReportClass=None, instructor_grade_script=None):
     """
     This thingy works:
-- 
GitLab