diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..335ea9d070ad1c319906aeff798584ded23c7387 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 The Python Packaging Authority + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 410439eb9bb2c7e48e453b3c79d9e5c2eef7a2a3..577ac9a6acf40cc39b136eabbcf823ae0158dd95 100644 --- a/README.md +++ b/README.md @@ -55,41 +55,48 @@ if __name__ == "__main__": ``` ### The test: The test consists of individual problems and a report-class. The tests themselves are just regular Unittest (we will see a slightly smarter idea in a moment). For instance: + ```python -from homework1 import reverse_list, add +from looping import reverse_list, add import unittest + class Week1(unittest.TestCase): def test_add(self): - self.assertEqual(add(2,2), 4) + self.assertEqual(add(2, 2), 4) self.assertEqual(add(-100, 5), -95) def test_reverse(self): - self.assertEqual(reverse_list([1,2,3]), [3,2,1]) + self.assertEqual(reverse_list([1, 2, 3]), [3, 2, 1]) ``` A number of tests can be collected into a `Report`, which will allow us to assign points to the tests and use the more advanced features of the framework later. A complete, minimal example: ```python -from unitgrade2.unitgrade2 import Report -from unitgrade2.unitgrade_helpers2 import evaluate_report_student -from homework1 import reverse_list, add +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from looping import reverse_list, add import unittest + class Week1(unittest.TestCase): def test_add(self): - self.assertEqual(add(2,2), 4) + self.assertEqual(add(2, 2), 4) self.assertEqual(add(-100, 5), -95) def test_reverse(self): - self.assertEqual(reverse_list([1,2,3]), [3,2,1]) + self.assertEqual(reverse_list([1, 2, 3]), [3, 2, 1]) + import cs101 + + class Report1(Report): title = "CS 101 Report 1" questions = [(Week1, 10)] # Include a single question for 10 credits. pack_imports = [cs101] + if __name__ == "__main__": # Uncomment to simply run everything as a unittest: # unittest.main(verbosity=2) @@ -109,7 +116,7 @@ if __name__ == "__main__": setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False) # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper - snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + snip_dir.snip_dir(source_dir="../programs", dest_dir="../../students/programs", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) ``` - The first line creates the `report1_grade.py` script and any additional data files needed by the tests (none in this case) @@ -193,15 +200,18 @@ also that the students implementations didn't just detect what input was being u return the correct answer. To do that you need hidden tests and external validation. Our new testclass looks like this: + ```python -from unitgrade2.unitgrade2 import UTestCase, Report, hide -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, Report, hide +from src.unitgrade2 import evaluate_report_student + class Week1(UTestCase): """ The first question for week 1. """ + def test_add(self): from cs103.homework1 import add - self.assertEqualC(add(2,2)) + self.assertEqualC(add(2, 2)) self.assertEqualC(add(-100, 5)) @hide @@ -209,14 +219,18 @@ class Week1(UTestCase): # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test. # See the output in the student directory for more information. from cs103.homework1 import add - self.assertEqualC(add(2,2)) + self.assertEqualC(add(2, 2)) + import cs103 + + class Report3(Report): title = "CS 101 Report 3" questions = [(Week1, 20)] # Include a single question for 10 credits. pack_imports = [cs103] + if __name__ == "__main__": evaluate_report_student(Report3()) ``` diff --git a/autolab/__pycache__/autolab.cpython-38.pyc b/autolab/__pycache__/autolab.cpython-38.pyc deleted file mode 100644 index 86ad4a1d06d4f3267ad22b37b7839b186984db04..0000000000000000000000000000000000000000 Binary files a/autolab/__pycache__/autolab.cpython-38.pyc and /dev/null differ diff --git a/autolab/report_autolab.py b/autolab/report_autolab.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/build.md b/build.md new file mode 100644 index 0000000000000000000000000000000000000000..4ba07379ee4d13fed73097fac750846a1db4b30a --- /dev/null +++ b/build.md @@ -0,0 +1,18 @@ +# Unitgrade build info + +See https://packaging.python.org/tutorials/packaging-projects/ + +- Build the distribution package using: +``` +py -m pip install --upgrade build && py -m build +``` +- Upload to test repo +``` +py -m pip install --upgrade twine && py -m twine upload --repository testpypi dist/* +``` + +### build and upload (to actual pypi; remember the .pypi token. you can find it in personal dtu repo) +``` +rm -f dists/* && py -m build && twine upload dist/* +``` + diff --git a/cs202courseware/ascimenu.py b/cs202courseware/ascimenu.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f15615b911bf1a9386ed863e1b615e61127bcd --- /dev/null +++ b/cs202courseware/ascimenu.py @@ -0,0 +1,43 @@ +from prompt_toolkit.application import Application +from prompt_toolkit.application.current import get_app +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous +from prompt_toolkit.layout import Dimension, HSplit, Layout, ScrollablePane +from prompt_toolkit.widgets import Frame, Label, TextArea + +print("hello") + +z = 234 +def main(): + # Create a big layout of many text areas, then wrap them in a `ScrollablePane`. + root_container = Frame( + ScrollablePane( + HSplit( + [ + Frame(TextArea(text=f"label-{i}"), width=Dimension()) + for i in range(20) + ] + ) + ) + # ScrollablePane(HSplit([TextArea(text=f"label-{i}") for i in range(20)])) + ) + + layout = Layout(container=root_container) + + # Key bindings. + kb = KeyBindings() + + @kb.add("c-c") + def exit(event) -> None: + get_app().exit() + + kb.add("down")(focus_next) + kb.add("up")(focus_previous) + + # Create and run application. + application = Application(layout=layout, key_bindings=kb, full_screen=True) + application.run() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cs202courseware/ug2report1.py b/cs202courseware/ug2report1.py index 586a004b82244f295d2a44495f581055f2ea473b..e0e7e0536f17bf4a679b7dd016205a95f06391cc 100644 --- a/cs202courseware/ug2report1.py +++ b/cs202courseware/ug2report1.py @@ -1,16 +1,13 @@ # from unitgrade.unitgrade import QuestionGroup, Report, QPrintItem # from unitgrade.unitgrade_helpers import evaluate_report_student -from cs202courseware import homework1 -import unittest - -from unitgrade2.unitgrade2 import wrapper, UTestCase, cache +from src.unitgrade2.unitgrade2 import UTestCase, cache from unittest import TestCase # from unitgrade2.unitgrade2 import cache -from unitgrade2.unitgrade2 import methodsWithDecorator, hide +from src.unitgrade2.unitgrade2 import hide import random -from cs202courseware.homework1 import reverse_list, my_sum +from cs202courseware.homework1 import my_sum class TestPartial(TestCase): def test_a(self): @@ -83,7 +80,7 @@ class ListQuestion(UTestCase): """ ccc test_integers-short """ self.assertEqual(2,2) -from unitgrade2.unitgrade2 import Report +from src.unitgrade2.unitgrade2 import Report class Report1(Report): title = "CS 202 Report 1" @@ -97,7 +94,6 @@ class Report1(Report): pack_imports = [cs202courseware] # Include this file in .token file a = 234 -import coverage if __name__ == "__main__": """ @@ -113,7 +109,7 @@ if __name__ == "__main__": # print(inspect.getsourcelines(f) ) # How to get all hidden questions. - from unitgrade2.unitgrade_helpers2 import evaluate_report_student + from src.unitgrade2 import evaluate_report_student # cov = coverage.Coverage() # cov.start() diff --git a/cs202courseware/ug2report1_nohidden.py b/cs202courseware/ug2report1_nohidden.py index 180417c0ab14fe0e2089db4d64c0a2ef744d3140..3e94a57bb6a047e242a83e72560d7b617f90202d 100644 --- a/cs202courseware/ug2report1_nohidden.py +++ b/cs202courseware/ug2report1_nohidden.py @@ -1,17 +1,14 @@ # from unitgrade.unitgrade import QuestionGroup, Report, QPrintItem # from unitgrade.unitgrade_helpers import evaluate_report_student -from cs202courseware import homework1 -import unittest - -from unitgrade2.unitgrade2 import wrapper, UTestCase, cache +from src.unitgrade2.unitgrade2 import UTestCase, cache # from unitgrade2.unitgrade2 import cache -from unitgrade2.unitgrade2 import methodsWithDecorator, hide +from src.unitgrade2.unitgrade2 import methodsWithDecorator, hide import random -from cs101courseware_example.homework1 import reverse_list, my_sum +from cs101courseware_example.homework1 import my_sum class GeneratorQuestion(UTestCase): def genTest(self, n): @@ -57,7 +54,7 @@ class ListQuestion(UTestCase): """ ccc test_integers-short """ self.assertEqual(2,2) -from unitgrade2.unitgrade2 import Report +from src.unitgrade2.unitgrade2 import Report class Report1(Report): title = "CS 202 Report 1" @@ -103,7 +100,7 @@ if __name__ == "__main__": for f in ls: print(inspect.getsourcelines(f) ) # How to get all hidden questions. - from unitgrade2.unitgrade_helpers2 import evaluate_report_student + from src.unitgrade2 import evaluate_report_student evaluate_report_student( Report1() ) diff --git a/dist/unitgrade-devel-0.0.1.tar.gz b/dist/unitgrade-devel-0.0.1.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9a8b82a9d4e5c3c61bd6ced6cace4b2a6bfb40b9 Binary files /dev/null and b/dist/unitgrade-devel-0.0.1.tar.gz differ diff --git a/dist/unitgrade_devel-0.0.1-py3-none-any.whl b/dist/unitgrade_devel-0.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..6e566dfa026e00890940fe2102371f73794be2be Binary files /dev/null and b/dist/unitgrade_devel-0.0.1-py3-none-any.whl differ diff --git a/autolab/docker_tango_python/Dockerfile b/docker_images/docker_tango_python/Dockerfile similarity index 100% rename from autolab/docker_tango_python/Dockerfile rename to docker_images/docker_tango_python/Dockerfile diff --git a/autolab/docker_tango_python/requirements.txt b/docker_images/docker_tango_python/requirements.txt similarity index 86% rename from autolab/docker_tango_python/requirements.txt rename to docker_images/docker_tango_python/requirements.txt index 9db612034545e9fe2987891d5083398a6e3958c6..0a73d686e2bf43229199808a9ffaa11c00d85f8a 100644 --- a/autolab/docker_tango_python/requirements.txt +++ b/docker_images/docker_tango_python/requirements.txt @@ -4,3 +4,4 @@ jinja2 tabulate compress_pickle pyfiglet +colorama \ No newline at end of file diff --git a/examples/example_docker/instructor/unitgrade-docker/Dockerfile b/docker_images/unitgrade-docker/Dockerfile similarity index 93% rename from examples/example_docker/instructor/unitgrade-docker/Dockerfile rename to docker_images/unitgrade-docker/Dockerfile index 08764b54174765afbc4c666a85f2ffaed1f37103..98a40077104bfe3274ee00062beac6769934e1a4 100644 --- a/examples/example_docker/instructor/unitgrade-docker/Dockerfile +++ b/docker_images/unitgrade-docker/Dockerfile @@ -5,7 +5,7 @@ FROM python:3.8-slim-buster RUN apt-get -y update RUN apt-get -y install git -WORKDIR /app +WORKDIR /home # Remember to include requirements. COPY requirements.txt requirements.txt @@ -16,6 +16,6 @@ RUN pip3 install -r requirements.txt COPY . . -ADD . /app +ADD . /home # CMD [ "python3", "app.py"] diff --git a/docker_images/unitgrade-docker/home/cs103/Report3_handin_5_of_30.token b/docker_images/unitgrade-docker/home/cs103/Report3_handin_5_of_30.token new file mode 100644 index 0000000000000000000000000000000000000000..a861bfbfbcc029d09e49a0a278598a0e2355ee66 Binary files /dev/null and b/docker_images/unitgrade-docker/home/cs103/Report3_handin_5_of_30.token differ diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc b/docker_images/unitgrade-docker/home/cs103/__pycache__/homework1.cpython-38.pyc similarity index 73% rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc rename to docker_images/unitgrade-docker/home/cs103/__pycache__/homework1.cpython-38.pyc index 6403436716a672ff6c51a7026fc0edf847aa3213..d57142a7eeb21f6b172586b47613efbfeadc1082 100644 Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/homework1.cpython-38.pyc and b/docker_images/unitgrade-docker/home/cs103/__pycache__/homework1.cpython-38.pyc differ diff --git a/docker_images/unitgrade-docker/home/cs103/__pycache__/report3_complete_grade.cpython-38.pyc b/docker_images/unitgrade-docker/home/cs103/__pycache__/report3_complete_grade.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa3a3187faae8d51a53e55a82bd75e0521299a19 Binary files /dev/null and b/docker_images/unitgrade-docker/home/cs103/__pycache__/report3_complete_grade.cpython-38.pyc differ diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py b/docker_images/unitgrade-docker/home/cs103/homework1.py similarity index 100% rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/homework1.py rename to docker_images/unitgrade-docker/home/cs103/homework1.py diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py b/docker_images/unitgrade-docker/home/cs103/report3.py similarity index 74% rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py rename to docker_images/unitgrade-docker/home/cs103/report3.py index c97b5a4117c254a17a5fed6787a485f4e69e0ebf..f83bb5384fa32a708a2be59d120813fd92bdc9ef 100644 --- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3.py +++ b/docker_images/unitgrade-docker/home/cs103/report3.py @@ -1,8 +1,8 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -from unitgrade2.unitgrade2 import UTestCase, Report, hide -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, Report +from src.unitgrade2 import evaluate_report_student class Week1(UTestCase): """ The first question for week 1. """ @@ -24,4 +24,6 @@ class Report3(Report): pack_imports = [cs103] if __name__ == "__main__": + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet(Report3()) evaluate_report_student(Report3()) diff --git a/docker_images/unitgrade-docker/home/cs103/report3_complete_grade.py b/docker_images/unitgrade-docker/home/cs103/report3_complete_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea5f2e975d3a8e8f50c6047888728d222e800b4 --- /dev/null +++ b/docker_images/unitgrade-docker/home/cs103/report3_complete_grade.py @@ -0,0 +1,338 @@ + +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.normpath(os.path.join(output_dir, token)) + + + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n @hide\n def test_add_hidden(self):\n # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n # See the output in the student directory for more information.\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n @hide\n def test_hidden_fail(self):\n self.assertEqual(2,3)\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049589000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b047568018c0f746573745f6164645f68696464656e948694680586947d944b004b04738c0474696d6594473fe3b8a400000000758c0d4175746f6d6174696350617373947d94680c473fc45a520000000073752e' +name="Report3" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/docker_images/unitgrade-docker/home/cs103/report3_grade.py b/docker_images/unitgrade-docker/home/cs103/report3_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..3c64c04d0172461f76215a8826a27e68a4d7071e --- /dev/null +++ b/docker_images/unitgrade-docker/home/cs103/report3_grade.py @@ -0,0 +1,340 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.normpath(os.path.join(output_dir, token)) + + + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049568000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04758c0474696d6594473fb71ac800000000758c0d4175746f6d6174696350617373947d946808473fb127100000000073752e' +name="Report3" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) diff --git a/examples/example_docker/instructor/unitgrade-docker/requirements.txt b/docker_images/unitgrade-docker/requirements.txt similarity index 86% rename from examples/example_docker/instructor/unitgrade-docker/requirements.txt rename to docker_images/unitgrade-docker/requirements.txt index 9db612034545e9fe2987891d5083398a6e3958c6..0a73d686e2bf43229199808a9ffaa11c00d85f8a 100644 --- a/examples/example_docker/instructor/unitgrade-docker/requirements.txt +++ b/docker_images/unitgrade-docker/requirements.txt @@ -4,3 +4,4 @@ jinja2 tabulate compress_pickle pyfiglet +colorama \ No newline at end of file diff --git a/docker_images/unitgrade-docker/tmp/cs103/homework1.py b/docker_images/unitgrade-docker/tmp/cs103/homework1.py new file mode 100644 index 0000000000000000000000000000000000000000..3543f1ba46b63eec3a2c2e007ee998660c7136c6 --- /dev/null +++ b/docker_images/unitgrade-docker/tmp/cs103/homework1.py @@ -0,0 +1,21 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +def reverse_list(mylist): + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + # TODO: 1 lines missing. + raise NotImplementedError("Implement function body") + +def add(a,b): + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ + # TODO: 1 lines missing. + raise NotImplementedError("Implement function body") + +if __name__ == "__main__": + # Problem 1: Write a function which add two numbers + print(f"Your result of 2 + 2 = {add(2,2)}") + print(f"Reversing a small list", reverse_list([2,3,5,7])) diff --git a/docker_images/unitgrade-docker/tmp/cs103/report3.py b/docker_images/unitgrade-docker/tmp/cs103/report3.py new file mode 100644 index 0000000000000000000000000000000000000000..f83bb5384fa32a708a2be59d120813fd92bdc9ef --- /dev/null +++ b/docker_images/unitgrade-docker/tmp/cs103/report3.py @@ -0,0 +1,29 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +from src.unitgrade2.unitgrade2 import UTestCase, Report +from src.unitgrade2 import evaluate_report_student + +class Week1(UTestCase): + """ The first question for week 1. """ + def test_add(self): + from cs103.homework1 import add + self.assertEqualC(add(2,2)) + self.assertEqualC(add(-100, 5)) + + +class AutomaticPass(UTestCase): + def test_student_passed(self): + self.assertEqual(2,2) + + +import cs103 +class Report3(Report): + title = "CS 101 Report 3" + questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits. + pack_imports = [cs103] + +if __name__ == "__main__": + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet(Report3()) + evaluate_report_student(Report3()) diff --git a/docker_images/unitgrade-docker/tmp/cs103/report3_complete_grade.py b/docker_images/unitgrade-docker/tmp/cs103/report3_complete_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..1101b26d27b6a3a303d7196df758443c81961368 --- /dev/null +++ b/docker_images/unitgrade-docker/tmp/cs103/report3_complete_grade.py @@ -0,0 +1,338 @@ + +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.normpath(os.path.join(output_dir, token)) + + + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n @hide\n def test_add_hidden(self):\n # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n # See the output in the student directory for more information.\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n @hide\n def test_hidden_fail(self):\n self.assertEqual(2,3)\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049589000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b047568018c0f746573745f6164645f68696464656e948694680586947d944b004b04738c0474696d6594473fda6e8700000000758c0d4175746f6d6174696350617373947d94680c473fb8d5140000000073752e' +name="Report3" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/docker_images/unitgrade-docker/tmp/cs103/report3_grade.py b/docker_images/unitgrade-docker/tmp/cs103/report3_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..85573c9c2dae744775132c6599f9b29d04fd0ac5 --- /dev/null +++ b/docker_images/unitgrade-docker/tmp/cs103/report3_grade.py @@ -0,0 +1,340 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.normpath(os.path.join(output_dir, token)) + + + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049568000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04758c0474696d6594473fb1eb1c00000000758c0d4175746f6d6174696350617373947d946808473fa78d300000000073752e' +name="Report3" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) diff --git a/examples/02471/instructor/02471/report1.py b/examples/02471/instructor/02471/report1.py index 1ed131f5a1e075bba9bc14c97d92df21e7670081..96bb9522044894586ef6574639bd04d019d43e8b 100644 --- a/examples/02471/instructor/02471/report1.py +++ b/examples/02471/instructor/02471/report1.py @@ -114,7 +114,7 @@ if __name__ == "__main__": # from week02 import Week_2_sol import importnb - file = "week02/week2.ipynb" + file = "../../../example_jupyter/instructor/cs105/week2.ipynb" file2 = 'week02/Week_2_sol.ipynb' m = importnb.Notebook.load(file) # importnb.Notebook.l diff --git a/examples/02631/instructor/programs/.coverage b/examples/02631/instructor/programs/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..11b2ec620c9b142ae8ee79c7a6f5afa297121530 Binary files /dev/null and b/examples/02631/instructor/programs/.coverage differ diff --git a/examples/02631/instructor/programs/__pycache__/looping.cpython-38.pyc b/examples/02631/instructor/programs/__pycache__/looping.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa302e5a102c505ba4b62aa5eab3205ca2683298 Binary files /dev/null and b/examples/02631/instructor/programs/__pycache__/looping.cpython-38.pyc differ diff --git a/examples/02631/instructor/programs/__pycache__/report1intro.cpython-38.pyc b/examples/02631/instructor/programs/__pycache__/report1intro.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddadca6243eeebd021c63d8d875e9092033e1190 Binary files /dev/null and b/examples/02631/instructor/programs/__pycache__/report1intro.cpython-38.pyc differ diff --git a/examples/02631/instructor/programs/deploy.py b/examples/02631/instructor/programs/deploy.py new file mode 100644 index 0000000000000000000000000000000000000000..6099792b035c235a9110c870e889c1dcd1f4c039 --- /dev/null +++ b/examples/02631/instructor/programs/deploy.py @@ -0,0 +1,62 @@ +from report1intro import Report1Flat +from unitgrade_private2.hidden_create_files import setup_grade_file_report +from snipper import snip_dir + +if __name__ == "__main__": + setup_grade_file_report(Report1Flat, minify=False, obfuscate=False, execute=False, with_coverage=True) + + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet((Report1Flat())) + + # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper + snip_dir.snip_dir(source_dir="", dest_dir="../../students/programs", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + + import os + os.system("python report1intro_grade.py") + + +""" +from coverage import CoverageData +import coverage +cov2 = coverage.Coverage() + + def setUp(self): + import trace + self.cov = cov2 + self.cov.start() + + + + # self.tracer.start() + + # using obj_to_trace + + def tearDown(self) -> None: + + self.cov.stop() + print() + + data = CoverageData() + + # data.measured_files() + # data.lines() + data = self.cov.get_data() + # data. + for file in data.measured_files(): + print(file) + print(data.lines(file)) + print(data.arcs(file)) + print( data.contexts_by_lineno(file)) + + # print(data[file]) + + + +- Idea: Measure coverage in setup/teardown. This gives a handful of covered lines. +- During setup, supply a dicionary to UTestCase of files, along with the lines that are removed. +- When running setup: Take the coverage report, and compare against files. Write functions/lines encountered to the cache dictionary. Rquires you to +- inspect the functions that are edited to figure out what is removed. This can probably be done by going upwars towards the first sensible class or function definition (which has not been removed). +- Supply a dictionary to UTestCase of files, along with the lines edited. Allow UTestCase to write this information to the + cache dictionary (i.e. lines removed). Then use this information when displaying helpful hints later. + +""" \ No newline at end of file diff --git a/examples/02631/instructor/programs/looping.py b/examples/02631/instructor/programs/looping.py new file mode 100644 index 0000000000000000000000000000000000000000..59a1485204b3c5fbf7c8e7e5d86531fa71c39d21 --- /dev/null +++ b/examples/02631/instructor/programs/looping.py @@ -0,0 +1,64 @@ +import numpy as np +import itertools + +def bacteriaGrowth(n0, alpha, K, N): #!f + """ + Calculate time until bacteria growth exceed N starting from a population of n0 bacteria. + hints: + * consider n0 + * alpha > 0 + :param n0: + :param alpha: + :param K: + :param N: + :return: + """ + if n0 > N: + return 0 + for t in itertools.count(): + n0 = (1 + alpha * (1-n0 / K) ) * n0 + if n0 > N: + break + return t+1 + +def clusterAnalysis(reflectance): + reflectance = np.asarray(reflectance) + I1 = np.arange(len(reflectance)) % 2 == 1 + while True: + m = np.asarray( [np.mean( reflectance[~I1] ), np.mean( reflectance[I1] ) ] ) + I1_ = np.argmin( np.abs( reflectance[:, np.newaxis] - m[np.newaxis, :] ), axis=1) == 1 + if all(I1_ == I1): + break + I1 = I1_ + return I1 + 1 + +def fermentationRate(measuredRate, lowerBound, upperBound): + # Insert your code here + return np.mean( [r for r in measuredRate if lowerBound < r < upperBound] ) + + + + +def removeIncomplete(id): + """ Hints: + * Take a look at the example in the exercise. + """ + id = np.asarray(id) + id2 = [] + for i, v in enumerate(id): + if len( [x for x in id if int(x) == int(v) ] ) == 3: + id2.append(v) + return np.asarray(id2) + + +if __name__ == "__main__": + # I = clusterAnalysis([1.7, 1.6, 1.3, 1.3, 2.8, 1.4, 2.8, 2.6, 1.6, 2.7]) + # print(I) + + print(fermentationRate(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25)) + + + # print(removeIncomplete(np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]))) + + # Problem 1: Write a function which add two numbers + # clusterAnalysis([2, 1, 2, 4, 5]) \ No newline at end of file diff --git a/examples/02631/instructor/programs/report1intro.py b/examples/02631/instructor/programs/report1intro.py new file mode 100644 index 0000000000000000000000000000000000000000..10b189828bf2a25d5b6bfbf46efb233c51bd5d8f --- /dev/null +++ b/examples/02631/instructor/programs/report1intro.py @@ -0,0 +1,139 @@ +from src.unitgrade2.unitgrade2 import Report, UTestCase, cache +from src.unitgrade2 import evaluate_report_student +import numpy as np +import looping +from looping import bacteriaGrowth, clusterAnalysis, removeIncomplete, fermentationRate + +def trlist(x): + s = str(list(x)) + if len(s) > 30: + s = s[:30] + "...]" + return s + +class Bacteria(UTestCase): + """ Bacteria growth rates """ + + def stest(self, n0, alpha, K, N): + g = bacteriaGrowth(n0=n0, alpha=alpha, K=K, N=N) + self.title = f"bacteriaGrowth({n0}, {alpha}, {K}, {N}) = {g} ?" + self.assertEqualC(g) + + def test_growth1(self): + """ Hints: + * Make sure to frobulate the frobulator. + """ + self.stest(100, 0.4, 1000, 500) + + def test_growth2(self): + self.stest(10, 0.4, 1000, 500) + + def test_growth3(self): + self.stest(100, 1.4, 1000, 500) + + def test_growth4(self): + self.stest(100, 0.0004, 1000, 500) + + def test_growth5(self): + """ + hints: + * What happens when n0 > N? (in this case return t=0) """ + self.stest(100, 0.4, 1000, 99) + +class ClusterAnalysis(UTestCase): + """ Test the cluster analysis method """ + + def stest(self, n, seed): + np.random.seed(seed) + x = np.round(np.random.rand(n), 1) + I = clusterAnalysis(x) + self.title = f"clusterAnalysis({list(x)}) = {list(I)} ?" + self.assertEqualC(list(I)) + + def test_cluster1(self): + """ Hints: + * Make sure to frobulate the frobulator. + * Just try harder + """ + self.stest(3, 10) + + def test_cluster2(self): + self.stest(4, 146) + + def test_cluster3(self): + self.stest(5, 12) + + def test_cluster4(self): + """ + Cluster analysis for tied lists + Hints: + * It may be that an observations has the same distance to the two clusters. Where do you assign it in this case? + """ + x = np.array([10.0, 12.0, 10.0, 12.0, 9.0, 11.0, 11.0, 13.0]) + self.assertEqualC(list(clusterAnalysis(x) ) ) + + +class RemoveIncomplete(UTestCase): + """ Remove incomplete IDs """ + + def stest(self, x): + I = list( removeIncomplete(x) ) + self.title = f"removeId({trlist(x)}) = {trlist(I)} ?" + self.assertEqualC(I) + + @cache + def rseq(self, max, n): + np.random.seed(42) + return np.random.randint(max, size=(n,) ) + (np.random.randint(2, size=(n,) )+1)/10 + + def test_incomplete1(self): + self.stest( np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]) ) + + def test_incomplete2(self): + self.stest( np.array([1.1, 1.2, 1.3, 2.1, 2.2, 2.3]) ) + + def test_incomplete3(self): + self.stest(np.array([5.1, 5.2, 4.1, 4.3, 4.2, 8.1, 8.2, 8.3]) ) + + def test_incomplete4(self): + self.stest(np.array([1.1, 1.3, 2.1, 2.2, 3.1, 3.3, 4.1, 4.2, 4.3]) ) + + def test_incomplete5(self): + self.stest(self.rseq(10, 40)) + + +class FermentationRate(UTestCase): + """ Test the fermentation rate question """ + + def stest(self, x, lower, upper): + I = fermentationRate(x, lower, upper) + s = trlist(x) + self.title = f"fermentationRate({s}, {lower}, {upper}) = {I:.3f} ?" + self.assertEqualC(I) + + @cache + def rseq(self, max, n): + np.random.seed(42) + return np.random.randint(max, size=(n,) ) + (np.random.randint(3, size=(n,) )+1)/n + + def test_rate1(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25) + + def test_rate2(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 1, 200) + + def test_rate3(self): + self.stest(np.array([1.75]), 1, 2) + + def test_rate4(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 18.2, 20) + + +class Report1Flat(Report): + title = "Week 4: Looping" + questions = [(ClusterAnalysis, 10), (RemoveIncomplete, 10), (Bacteria, 10), (FermentationRate, 10),] + pack_imports = [looping] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Flat()) diff --git a/examples/02631/instructor/programs/report1intro_grade.py b/examples/02631/instructor/programs/report1intro_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..4381d552613c3e0225f52877a920e81455de65a1 --- /dev/null +++ b/examples/02631/instructor/programs/report1intro_grade.py @@ -0,0 +1,337 @@ + +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + # nL = + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f"Question {n+1} total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n # nL =\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport numpy as np\nimport looping\nfrom looping import bacteriaGrowth, clusterAnalysis, removeIncomplete, fermentationRate\n\ndef trlist(x):\n s = str(list(x))\n if len(s) > 30:\n s = s[:30] + "...]"\n return s\n\nclass Bacteria(UTestCase):\n """ Bacteria growth rates """\n\n def stest(self, n0, alpha, K, N):\n g = bacteriaGrowth(n0=n0, alpha=alpha, K=K, N=N)\n self.title = f"bacteriaGrowth({n0}, {alpha}, {K}, {N}) = {g} ?"\n self.assertEqualC(g)\n\n def test_growth1(self):\n """ Hints:\n * Make sure to frobulate the frobulator.\n """\n self.stest(100, 0.4, 1000, 500)\n\n def test_growth2(self):\n self.stest(10, 0.4, 1000, 500)\n\n def test_growth3(self):\n self.stest(100, 1.4, 1000, 500)\n\n def test_growth4(self):\n self.stest(100, 0.0004, 1000, 500)\n\n def test_growth5(self):\n """\n hints:\n * What happens when n0 > N? (in this case return t=0) """\n self.stest(100, 0.4, 1000, 99)\n\nclass ClusterAnalysis(UTestCase):\n """ Test the cluster analysis method """\n\n def stest(self, n, seed):\n np.random.seed(seed)\n x = np.round(np.random.rand(n), 1)\n I = clusterAnalysis(x)\n self.title = f"clusterAnalysis({list(x)}) = {list(I)} ?"\n self.assertEqualC(list(I))\n\n def test_cluster1(self):\n """ Hints:\n * Make sure to frobulate the frobulator.\n * Just try harder\n """\n self.stest(3, 10)\n\n def test_cluster2(self):\n self.stest(4, 146)\n\n def test_cluster3(self):\n self.stest(5, 12)\n\n def test_cluster4(self):\n """\n Cluster analysis for tied lists\n Hints:\n * It may be that an observations has the same distance to the two clusters. Where do you assign it in this case?\n """\n x = np.array([10.0, 12.0, 10.0, 12.0, 9.0, 11.0, 11.0, 13.0])\n self.assertEqualC(list(clusterAnalysis(x) ) )\n\n\nclass RemoveIncomplete(UTestCase):\n """ Remove incomplete IDs """\n\n def stest(self, x):\n I = list( removeIncomplete(x) )\n self.title = f"removeId({trlist(x)}) = {trlist(I)} ?"\n self.assertEqualC(I)\n\n @cache\n def rseq(self, max, n):\n np.random.seed(42)\n return np.random.randint(max, size=(n,) ) + (np.random.randint(2, size=(n,) )+1)/10\n\n def test_incomplete1(self):\n self.stest( np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]) )\n\n def test_incomplete2(self):\n self.stest( np.array([1.1, 1.2, 1.3, 2.1, 2.2, 2.3]) )\n\n def test_incomplete3(self):\n self.stest(np.array([5.1, 5.2, 4.1, 4.3, 4.2, 8.1, 8.2, 8.3]) )\n\n def test_incomplete4(self):\n self.stest(np.array([1.1, 1.3, 2.1, 2.2, 3.1, 3.3, 4.1, 4.2, 4.3]) )\n\n def test_incomplete5(self):\n self.stest(self.rseq(10, 40))\n\n\nclass FermentationRate(UTestCase):\n """ Test the fermentation rate question """\n\n def stest(self, x, lower, upper):\n I = fermentationRate(x, lower, upper)\n s = trlist(x)\n self.title = f"fermentationRate({s}, {lower}, {upper}) = {I:.3f} ?"\n self.assertEqualC(I)\n\n @cache\n def rseq(self, max, n):\n np.random.seed(42)\n return np.random.randint(max, size=(n,) ) + (np.random.randint(3, size=(n,) )+1)/n\n\n def test_rate1(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25)\n\n def test_rate2(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 1, 200)\n\n def test_rate3(self):\n self.stest(np.array([1.75]), 1, 2)\n\n def test_rate4(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 18.2, 20)\n\n\nclass Report1Flat(Report):\n title = "Week 4: Looping"\n questions = [(ClusterAnalysis, 10), (RemoveIncomplete, 10), (Bacteria, 10), (FermentationRate, 10),]\n pack_imports = [looping]' +report1_payload = '80049592150000000000007d94288c0f436c7573746572416e616c79736973947d94288c0f436c7573746572416e616c79736973948c0d746573745f636c7573746572319486948c057469746c659486948c2e636c7573746572416e616c79736973285b302e382c20302e302c20302e365d29203d205b312c20322c20315d203f946803680486948c066173736572749486947d944b005d94288c156e756d70792e636f72652e6d756c74696172726179948c067363616c61729493948c056e756d7079948c0564747970659493948c02693494898887945294284b038c013c944e4e4e4affffffff4affffffff4b007494624304010000009486945294681068164304020000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657232948694680686948c36636c7573746572416e616c79736973285b302e352c20302e362c20302e332c20302e335d29203d205b322c20322c20312c20315d203f94680368228694680a86947d944b005d9428681068164304020000009486945294681068164304020000009486945294681068164304010000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657233948694680686948c3e636c7573746572416e616c79736973285b302e322c20302e372c20302e332c20302e352c20302e305d29203d205b312c20322c20312c20322c20315d203f94680368368694680a86947d944b005d9428681068164304010000009486945294681068164304020000009486945294681068164304010000009486945294681068164304020000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657234948694680a86947d944b005d942868106816430401000000948694529468106816430402000000948694529468106816430401000000948694529468106816430402000000948694529468106816430401000000948694529468106816430401000000948694529468106816430401000000948694529468106816430402000000948694529465738c0474696d6594473fdf6c8500000000758c1052656d6f7665496e636f6d706c657465947d94288c1052656d6f7665496e636f6d706c657465948c10746573745f696e636f6d706c657465319486948c057469746c659486948c5372656d6f76654964285b312e332c20322e322c20322e332c20342e322c20352e312c20332e322c2e2e2e5d29203d205b322e322c20322e332c20352e312c20332e322c20352e332c20332e332c2e2e2e5d203f94686d686e86948c066173736572749486947d944b005d9428681068138c02663894898887945294284b0368174e4e4e4affffffff4affffffff4b0074946243089a9999999999014094869452946810687a4308666666666666024094869452946810687a4308666666666666144094869452946810687a43089a9999999999094094869452946810687a4308333333333333154094869452946810687a43086666666666660a4094869452946810687a4308cdcccccccccc004094869452946810687a4308cdcccccccccc144094869452946810687a4308cdcccccccccc084094869452946573686d8c10746573745f696e636f6d706c65746532948694687086948c4b72656d6f76654964285b312e312c20312e322c20312e332c20322e312c20322e322c20322e335d29203d205b312e312c20312e322c20312e332c20322e312c20322e322c20322e335d203f94686d68978694687486947d944b005d94286810687a43089a9999999999f13f94869452946810687a4308333333333333f33f94869452946810687a4308cdccccccccccf43f94869452946810687a4308cdcccccccccc004094869452946810687a43089a9999999999014094869452946810687a4308666666666666024094869452946573686d8c10746573745f696e636f6d706c65746533948694687086948c4f72656d6f76654964285b352e312c20352e322c20342e312c20342e332c20342e322c20382e312c2e2e2e5d29203d205b342e312c20342e332c20342e322c20382e312c20382e322c20382e335d203f94686d68b18694687486947d944b005d94286810687a4308666666666666104094869452946810687a4308333333333333114094869452946810687a4308cdcccccccccc104094869452946810687a4308333333333333204094869452946810687a4308666666666666204094869452946810687a43089a9999999999204094869452946573686d8c10746573745f696e636f6d706c65746534948694687086948c4072656d6f76654964285b312e312c20312e332c20322e312c20322e322c20332e312c20332e332c2e2e2e5d29203d205b342e312c20342e322c20342e335d203f94686d68cb8694687486947d944b005d94286810687a4308666666666666104094869452946810687a4308cdcccccccccc104094869452946810687a4308333333333333114094869452946573686d8c10746573745f696e636f6d706c657465359486948c06406361636865948c0472736571948c0966756e63746f6f6c73948c0a5f486173686564536571949394298194284b0a4b28654e7d948c096861736876616c7565948a0884d8ef03874d7f467386946287948694680e8c0c5f7265636f6e73747275637494939468118c076e6461727261799493944b0085944301629487945294284b014b28859468138c02663894898887945294284b0368174e4e4e4affffffff4affffffff4b0074946289424001000066666666666618409a99999999990940cdcccccccccc1c40cdcccccccccc1040cdcccccccccc184033333333333322409a999999999901406666666666661840cdcccccccccc1c40cdcccccccccc10409a999999999909406666666666661c40cdcccccccccc1c40cdcccccccccc0040cdcccccccccc14406666666666661040333333333333f33f6666666666661c406666666666661440333333333333f33f66666666666610409a9999999999c93f6666666666662240cdcccccccccc144066666666666620409a9999999999c93f66666666666622409a99999999990140cdcccccccccc18409a9999999999094066666666666620409a999999999901406666666666661040cdcccccccccc0040cdcccccccccc1840cdcccccccccc10406666666666662040cdcccccccccc1840333333333333f33f9a9999999999094094749462686d68dc8694687086948c5372656d6f76654964285b362e312c20332e322c20372e322c20342e322c20362e322c20392e312c2e2e2e5d29203d205b392e312c20352e322c20312e322c20352e312c20312e322c20392e322c2e2e2e5d203f94686d68dc8694687486947d944b005d94286810687a4308333333333333224094869452946810687a4308cdcccccccccc144094869452946810687a4308333333333333f33f94869452946810687a4308666666666666144094869452946810687a4308333333333333f33f94869452946810687a4308666666666666224094869452946810687a4308cdcccccccccc144094869452946810687a4308666666666666204094869452946810687a4308666666666666224094869452946810687a4308666666666666204094869452946810687a4308666666666666204094869452946810687a4308333333333333f33f94869452946573686a473fcf9dc400000000758c084261637465726961947d94288c084261637465726961948c0c746573745f67726f777468319486948c057469746c659486948c29626163746572696147726f777468283130302c20302e342c20313030302c2035303029203d2037203f946a250100006a2601000086948c066173736572749486947d944b004b07736a250100006a2601000086948c08636f7665726167659486947d94286a250100006a2601000086947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468329486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468339486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468349486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468359486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b118ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a202020202222229486947373756a250100006a3a01000086946a2801000086948c29626163746572696147726f7774682831302c20302e342c20313030302c2035303029203d203134203f946a250100006a3a01000086946a2c01000086947d944b004b0e736a250100006a3a01000086946a3001000086946a320100006a250100006a4201000086946a2801000086948c29626163746572696147726f777468283130302c20312e342c20313030302c2035303029203d2033203f946a250100006a4201000086946a2c01000086947d944b004b03736a250100006a4201000086946a3001000086946a320100006a250100006a4a01000086946a2801000086948c2f626163746572696147726f777468283130302c20302e303030342c20313030302c2035303029203d2035343934203f946a250100006a4a01000086946a2c01000086947d944b004d7615736a250100006a4a01000086946a3001000086946a320100006a250100006a5201000086946a2801000086948c28626163746572696147726f777468283130302c20302e342c20313030302c20393929203d2030203f946a250100006a5201000086946a2c01000086947d944b004b00736a250100006a5201000086946a3001000086946a32010000686a473fcf9d9a00000000758c104665726d656e746174696f6e52617465947d94288c104665726d656e746174696f6e52617465948c0a746573745f72617465319486948c057469746c659486948c476665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c2031352c20323529203d2031392e363030203f946a7c0100006a7d01000086948c066173736572749486947d944b006810687a43089a999999999933409486945294736a7c0100008c0a746573745f72617465329486946a7f01000086948c476665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c20312c2032303029203d2032392e393735203f946a7c0100006a8901000086946a8301000086947d944b006810687a43089899999999f93d409486945294736a7c0100008c0a746573745f72617465339486946a7f01000086948c286665726d656e746174696f6e52617465285b312e37355d2c20312c203229203d20312e373530203f946a7c0100006a9301000086946a8301000086947d944b006810687a4308000000000000fc3f9486945294736a7c0100008c0a746573745f72617465349486946a7f01000086948c496665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c2031382e322c20323029203d2031392e353030203f946a7c0100006a9d01000086946a8301000086947d944b006810687a43080000000000803340948694529473686a473fc74c0a0000000075752e' +name="Report1Flat" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/examples/02631/instructor/programs/unitgrade/Bacteria.pkl b/examples/02631/instructor/programs/unitgrade/Bacteria.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b246df45c63387d95bc69cc26deba7770d8249c5 Binary files /dev/null and b/examples/02631/instructor/programs/unitgrade/Bacteria.pkl differ diff --git a/examples/02631/instructor/programs/unitgrade/ClusterAnalysis.pkl b/examples/02631/instructor/programs/unitgrade/ClusterAnalysis.pkl new file mode 100644 index 0000000000000000000000000000000000000000..36635371f6e86d5f841a3827696a5909e766532b Binary files /dev/null and b/examples/02631/instructor/programs/unitgrade/ClusterAnalysis.pkl differ diff --git a/examples/02631/instructor/programs/unitgrade/FermentationRate.pkl b/examples/02631/instructor/programs/unitgrade/FermentationRate.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9f5cea13c96d67ff2704de398cd66b78a2db463b Binary files /dev/null and b/examples/02631/instructor/programs/unitgrade/FermentationRate.pkl differ diff --git a/examples/02631/instructor/programs/unitgrade/RemoveIncomplete.pkl b/examples/02631/instructor/programs/unitgrade/RemoveIncomplete.pkl new file mode 100644 index 0000000000000000000000000000000000000000..30edf2db7bc0e41f0ada9c1c7443ac1d487a6d9a Binary files /dev/null and b/examples/02631/instructor/programs/unitgrade/RemoveIncomplete.pkl differ diff --git a/examples/02631/students/programs/.coverage b/examples/02631/students/programs/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..11b2ec620c9b142ae8ee79c7a6f5afa297121530 Binary files /dev/null and b/examples/02631/students/programs/.coverage differ diff --git a/examples/02631/students/programs/__pycache__/looping.cpython-38.pyc b/examples/02631/students/programs/__pycache__/looping.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa302e5a102c505ba4b62aa5eab3205ca2683298 Binary files /dev/null and b/examples/02631/students/programs/__pycache__/looping.cpython-38.pyc differ diff --git a/examples/02631/students/programs/__pycache__/report1intro.cpython-38.pyc b/examples/02631/students/programs/__pycache__/report1intro.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddadca6243eeebd021c63d8d875e9092033e1190 Binary files /dev/null and b/examples/02631/students/programs/__pycache__/report1intro.cpython-38.pyc differ diff --git a/examples/02631/students/programs/looping.py b/examples/02631/students/programs/looping.py new file mode 100644 index 0000000000000000000000000000000000000000..34517b12e3e8a4712d6fa94e81d32124e0b5abf6 --- /dev/null +++ b/examples/02631/students/programs/looping.py @@ -0,0 +1,62 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +import itertools + +def bacteriaGrowth(n0, alpha, K, N): + """ + Calculate time until bacteria growth exceed N starting from a population of n0 bacteria. + hints: + * consider n0 + * alpha > 0 + :param n0: + :param alpha: + :param K: + :param N: + :return: + """ + # TODO: 7 lines missing. + raise NotImplementedError("Implement function body") + +def clusterAnalysis(reflectance): + reflectance = np.asarray(reflectance) + I1 = np.arange(len(reflectance)) % 2 == 1 + while True: + m = np.asarray( [np.mean( reflectance[~I1] ), np.mean( reflectance[I1] ) ] ) + I1_ = np.argmin( np.abs( reflectance[:, np.newaxis] - m[np.newaxis, :] ), axis=1) == 1 + if all(I1_ == I1): + break + I1 = I1_ + return I1 + 1 + +def fermentationRate(measuredRate, lowerBound, upperBound): + # Insert your code here + return np.mean( [r for r in measuredRate if lowerBound < r < upperBound] ) + + + + +def removeIncomplete(id): + """ Hints: + * Take a look at the example in the exercise. + """ + id = np.asarray(id) + id2 = [] + for i, v in enumerate(id): + if len( [x for x in id if int(x) == int(v) ] ) == 3: + id2.append(v) + return np.asarray(id2) + + +if __name__ == "__main__": + # I = clusterAnalysis([1.7, 1.6, 1.3, 1.3, 2.8, 1.4, 2.8, 2.6, 1.6, 2.7]) + # print(I) + + print(fermentationRate(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25)) + + + # print(removeIncomplete(np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]))) + + # Problem 1: Write a function which add two numbers + # clusterAnalysis([2, 1, 2, 4, 5]) diff --git a/examples/02631/students/programs/report1intro.py b/examples/02631/students/programs/report1intro.py new file mode 100644 index 0000000000000000000000000000000000000000..587129bf1557c8939dcabd21bd1ecd7658181416 --- /dev/null +++ b/examples/02631/students/programs/report1intro.py @@ -0,0 +1,142 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +from src.unitgrade2.unitgrade2 import Report, UTestCase, cache +from src.unitgrade2 import evaluate_report_student +import numpy as np +import looping +from looping import bacteriaGrowth, clusterAnalysis, removeIncomplete, fermentationRate + +def trlist(x): + s = str(list(x)) + if len(s) > 30: + s = s[:30] + "...]" + return s + +class Bacteria(UTestCase): + """ Bacteria growth rates """ + + def stest(self, n0, alpha, K, N): + g = bacteriaGrowth(n0=n0, alpha=alpha, K=K, N=N) + self.title = f"bacteriaGrowth({n0}, {alpha}, {K}, {N}) = {g} ?" + self.assertEqualC(g) + + def test_growth1(self): + """ Hints: + * Make sure to frobulate the frobulator. + """ + self.stest(100, 0.4, 1000, 500) + + def test_growth2(self): + self.stest(10, 0.4, 1000, 500) + + def test_growth3(self): + self.stest(100, 1.4, 1000, 500) + + def test_growth4(self): + self.stest(100, 0.0004, 1000, 500) + + def test_growth5(self): + """ + hints: + * What happens when n0 > N? (in this case return t=0) """ + self.stest(100, 0.4, 1000, 99) + +class ClusterAnalysis(UTestCase): + """ Test the cluster analysis method """ + + def stest(self, n, seed): + np.random.seed(seed) + x = np.round(np.random.rand(n), 1) + I = clusterAnalysis(x) + self.title = f"clusterAnalysis({list(x)}) = {list(I)} ?" + self.assertEqualC(list(I)) + + def test_cluster1(self): + """ Hints: + * Make sure to frobulate the frobulator. + * Just try harder + """ + self.stest(3, 10) + + def test_cluster2(self): + self.stest(4, 146) + + def test_cluster3(self): + self.stest(5, 12) + + def test_cluster4(self): + """ + Cluster analysis for tied lists + Hints: + * It may be that an observations has the same distance to the two clusters. Where do you assign it in this case? + """ + x = np.array([10.0, 12.0, 10.0, 12.0, 9.0, 11.0, 11.0, 13.0]) + self.assertEqualC(list(clusterAnalysis(x) ) ) + + +class RemoveIncomplete(UTestCase): + """ Remove incomplete IDs """ + + def stest(self, x): + I = list( removeIncomplete(x) ) + self.title = f"removeId({trlist(x)}) = {trlist(I)} ?" + self.assertEqualC(I) + + @cache + def rseq(self, max, n): + np.random.seed(42) + return np.random.randint(max, size=(n,) ) + (np.random.randint(2, size=(n,) )+1)/10 + + def test_incomplete1(self): + self.stest( np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]) ) + + def test_incomplete2(self): + self.stest( np.array([1.1, 1.2, 1.3, 2.1, 2.2, 2.3]) ) + + def test_incomplete3(self): + self.stest(np.array([5.1, 5.2, 4.1, 4.3, 4.2, 8.1, 8.2, 8.3]) ) + + def test_incomplete4(self): + self.stest(np.array([1.1, 1.3, 2.1, 2.2, 3.1, 3.3, 4.1, 4.2, 4.3]) ) + + def test_incomplete5(self): + self.stest(self.rseq(10, 40)) + + +class FermentationRate(UTestCase): + """ Test the fermentation rate question """ + + def stest(self, x, lower, upper): + I = fermentationRate(x, lower, upper) + s = trlist(x) + self.title = f"fermentationRate({s}, {lower}, {upper}) = {I:.3f} ?" + self.assertEqualC(I) + + @cache + def rseq(self, max, n): + np.random.seed(42) + return np.random.randint(max, size=(n,) ) + (np.random.randint(3, size=(n,) )+1)/n + + def test_rate1(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25) + + def test_rate2(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 1, 200) + + def test_rate3(self): + self.stest(np.array([1.75]), 1, 2) + + def test_rate4(self): + self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 18.2, 20) + + +class Report1Flat(Report): + title = "Week 4: Looping" + questions = [(ClusterAnalysis, 10), (RemoveIncomplete, 10), (Bacteria, 10), (FermentationRate, 10),] + pack_imports = [looping] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Flat()) diff --git a/examples/02631/students/programs/report1intro_grade.py b/examples/02631/students/programs/report1intro_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ffde860cc05e001413731852fbef6394e34784 --- /dev/null +++ b/examples/02631/students/programs/report1intro_grade.py @@ -0,0 +1,339 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + # nL = + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f"Question {n+1} total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n # nL =\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport numpy as np\nimport looping\nfrom looping import bacteriaGrowth, clusterAnalysis, removeIncomplete, fermentationRate\n\ndef trlist(x):\n s = str(list(x))\n if len(s) > 30:\n s = s[:30] + "...]"\n return s\n\nclass Bacteria(UTestCase):\n """ Bacteria growth rates """\n\n def stest(self, n0, alpha, K, N):\n g = bacteriaGrowth(n0=n0, alpha=alpha, K=K, N=N)\n self.title = f"bacteriaGrowth({n0}, {alpha}, {K}, {N}) = {g} ?"\n self.assertEqualC(g)\n\n def test_growth1(self):\n """ Hints:\n * Make sure to frobulate the frobulator.\n """\n self.stest(100, 0.4, 1000, 500)\n\n def test_growth2(self):\n self.stest(10, 0.4, 1000, 500)\n\n def test_growth3(self):\n self.stest(100, 1.4, 1000, 500)\n\n def test_growth4(self):\n self.stest(100, 0.0004, 1000, 500)\n\n def test_growth5(self):\n """\n hints:\n * What happens when n0 > N? (in this case return t=0) """\n self.stest(100, 0.4, 1000, 99)\n\nclass ClusterAnalysis(UTestCase):\n """ Test the cluster analysis method """\n\n def stest(self, n, seed):\n np.random.seed(seed)\n x = np.round(np.random.rand(n), 1)\n I = clusterAnalysis(x)\n self.title = f"clusterAnalysis({list(x)}) = {list(I)} ?"\n self.assertEqualC(list(I))\n\n def test_cluster1(self):\n """ Hints:\n * Make sure to frobulate the frobulator.\n * Just try harder\n """\n self.stest(3, 10)\n\n def test_cluster2(self):\n self.stest(4, 146)\n\n def test_cluster3(self):\n self.stest(5, 12)\n\n def test_cluster4(self):\n """\n Cluster analysis for tied lists\n Hints:\n * It may be that an observations has the same distance to the two clusters. Where do you assign it in this case?\n """\n x = np.array([10.0, 12.0, 10.0, 12.0, 9.0, 11.0, 11.0, 13.0])\n self.assertEqualC(list(clusterAnalysis(x) ) )\n\n\nclass RemoveIncomplete(UTestCase):\n """ Remove incomplete IDs """\n\n def stest(self, x):\n I = list( removeIncomplete(x) )\n self.title = f"removeId({trlist(x)}) = {trlist(I)} ?"\n self.assertEqualC(I)\n\n @cache\n def rseq(self, max, n):\n np.random.seed(42)\n return np.random.randint(max, size=(n,) ) + (np.random.randint(2, size=(n,) )+1)/10\n\n def test_incomplete1(self):\n self.stest( np.array([1.3, 2.2, 2.3, 4.2, 5.1, 3.2, 5.3, 3.3, 2.1, 1.1, 5.2, 3.1]) )\n\n def test_incomplete2(self):\n self.stest( np.array([1.1, 1.2, 1.3, 2.1, 2.2, 2.3]) )\n\n def test_incomplete3(self):\n self.stest(np.array([5.1, 5.2, 4.1, 4.3, 4.2, 8.1, 8.2, 8.3]) )\n\n def test_incomplete4(self):\n self.stest(np.array([1.1, 1.3, 2.1, 2.2, 3.1, 3.3, 4.1, 4.2, 4.3]) )\n\n def test_incomplete5(self):\n self.stest(self.rseq(10, 40))\n\n\nclass FermentationRate(UTestCase):\n """ Test the fermentation rate question """\n\n def stest(self, x, lower, upper):\n I = fermentationRate(x, lower, upper)\n s = trlist(x)\n self.title = f"fermentationRate({s}, {lower}, {upper}) = {I:.3f} ?"\n self.assertEqualC(I)\n\n @cache\n def rseq(self, max, n):\n np.random.seed(42)\n return np.random.randint(max, size=(n,) ) + (np.random.randint(3, size=(n,) )+1)/n\n\n def test_rate1(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 15, 25)\n\n def test_rate2(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 1, 200)\n\n def test_rate3(self):\n self.stest(np.array([1.75]), 1, 2)\n\n def test_rate4(self):\n self.stest(np.array([20.1, 19.3, 1.1, 18.2, 19.7, 121.1, 20.3, 20.0]), 18.2, 20)\n\n\nclass Report1Flat(Report):\n title = "Week 4: Looping"\n questions = [(ClusterAnalysis, 10), (RemoveIncomplete, 10), (Bacteria, 10), (FermentationRate, 10),]\n pack_imports = [looping]' +report1_payload = '80049592150000000000007d94288c0f436c7573746572416e616c79736973947d94288c0f436c7573746572416e616c79736973948c0d746573745f636c7573746572319486948c057469746c659486948c2e636c7573746572416e616c79736973285b302e382c20302e302c20302e365d29203d205b312c20322c20315d203f946803680486948c066173736572749486947d944b005d94288c156e756d70792e636f72652e6d756c74696172726179948c067363616c61729493948c056e756d7079948c0564747970659493948c02693494898887945294284b038c013c944e4e4e4affffffff4affffffff4b007494624304010000009486945294681068164304020000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657232948694680686948c36636c7573746572416e616c79736973285b302e352c20302e362c20302e332c20302e335d29203d205b322c20322c20312c20315d203f94680368228694680a86947d944b005d9428681068164304020000009486945294681068164304020000009486945294681068164304010000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657233948694680686948c3e636c7573746572416e616c79736973285b302e322c20302e372c20302e332c20302e352c20302e305d29203d205b312c20322c20312c20322c20315d203f94680368368694680a86947d944b005d9428681068164304010000009486945294681068164304020000009486945294681068164304010000009486945294681068164304020000009486945294681068164304010000009486945294657368038c0d746573745f636c757374657234948694680a86947d944b005d942868106816430401000000948694529468106816430402000000948694529468106816430401000000948694529468106816430402000000948694529468106816430401000000948694529468106816430401000000948694529468106816430401000000948694529468106816430402000000948694529465738c0474696d6594473fdf6c8500000000758c1052656d6f7665496e636f6d706c657465947d94288c1052656d6f7665496e636f6d706c657465948c10746573745f696e636f6d706c657465319486948c057469746c659486948c5372656d6f76654964285b312e332c20322e322c20322e332c20342e322c20352e312c20332e322c2e2e2e5d29203d205b322e322c20322e332c20352e312c20332e322c20352e332c20332e332c2e2e2e5d203f94686d686e86948c066173736572749486947d944b005d9428681068138c02663894898887945294284b0368174e4e4e4affffffff4affffffff4b0074946243089a9999999999014094869452946810687a4308666666666666024094869452946810687a4308666666666666144094869452946810687a43089a9999999999094094869452946810687a4308333333333333154094869452946810687a43086666666666660a4094869452946810687a4308cdcccccccccc004094869452946810687a4308cdcccccccccc144094869452946810687a4308cdcccccccccc084094869452946573686d8c10746573745f696e636f6d706c65746532948694687086948c4b72656d6f76654964285b312e312c20312e322c20312e332c20322e312c20322e322c20322e335d29203d205b312e312c20312e322c20312e332c20322e312c20322e322c20322e335d203f94686d68978694687486947d944b005d94286810687a43089a9999999999f13f94869452946810687a4308333333333333f33f94869452946810687a4308cdccccccccccf43f94869452946810687a4308cdcccccccccc004094869452946810687a43089a9999999999014094869452946810687a4308666666666666024094869452946573686d8c10746573745f696e636f6d706c65746533948694687086948c4f72656d6f76654964285b352e312c20352e322c20342e312c20342e332c20342e322c20382e312c2e2e2e5d29203d205b342e312c20342e332c20342e322c20382e312c20382e322c20382e335d203f94686d68b18694687486947d944b005d94286810687a4308666666666666104094869452946810687a4308333333333333114094869452946810687a4308cdcccccccccc104094869452946810687a4308333333333333204094869452946810687a4308666666666666204094869452946810687a43089a9999999999204094869452946573686d8c10746573745f696e636f6d706c65746534948694687086948c4072656d6f76654964285b312e312c20312e332c20322e312c20322e322c20332e312c20332e332c2e2e2e5d29203d205b342e312c20342e322c20342e335d203f94686d68cb8694687486947d944b005d94286810687a4308666666666666104094869452946810687a4308cdcccccccccc104094869452946810687a4308333333333333114094869452946573686d8c10746573745f696e636f6d706c657465359486948c06406361636865948c0472736571948c0966756e63746f6f6c73948c0a5f486173686564536571949394298194284b0a4b28654e7d948c096861736876616c7565948a0884d8ef03874d7f467386946287948694680e8c0c5f7265636f6e73747275637494939468118c076e6461727261799493944b0085944301629487945294284b014b28859468138c02663894898887945294284b0368174e4e4e4affffffff4affffffff4b0074946289424001000066666666666618409a99999999990940cdcccccccccc1c40cdcccccccccc1040cdcccccccccc184033333333333322409a999999999901406666666666661840cdcccccccccc1c40cdcccccccccc10409a999999999909406666666666661c40cdcccccccccc1c40cdcccccccccc0040cdcccccccccc14406666666666661040333333333333f33f6666666666661c406666666666661440333333333333f33f66666666666610409a9999999999c93f6666666666662240cdcccccccccc144066666666666620409a9999999999c93f66666666666622409a99999999990140cdcccccccccc18409a9999999999094066666666666620409a999999999901406666666666661040cdcccccccccc0040cdcccccccccc1840cdcccccccccc10406666666666662040cdcccccccccc1840333333333333f33f9a9999999999094094749462686d68dc8694687086948c5372656d6f76654964285b362e312c20332e322c20372e322c20342e322c20362e322c20392e312c2e2e2e5d29203d205b392e312c20352e322c20312e322c20352e312c20312e322c20392e322c2e2e2e5d203f94686d68dc8694687486947d944b005d94286810687a4308333333333333224094869452946810687a4308cdcccccccccc144094869452946810687a4308333333333333f33f94869452946810687a4308666666666666144094869452946810687a4308333333333333f33f94869452946810687a4308666666666666224094869452946810687a4308cdcccccccccc144094869452946810687a4308666666666666204094869452946810687a4308666666666666224094869452946810687a4308666666666666204094869452946810687a4308666666666666204094869452946810687a4308333333333333f33f94869452946573686a473fcf9dc400000000758c084261637465726961947d94288c084261637465726961948c0c746573745f67726f777468319486948c057469746c659486948c29626163746572696147726f777468283130302c20302e342c20313030302c2035303029203d2037203f946a250100006a2601000086948c066173736572749486947d944b004b07736a250100006a2601000086948c08636f7665726167659486947d94286a250100006a2601000086947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468329486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468339486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468349486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b158ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a2020202022222294869473736a250100008c0c746573745f67726f777468359486947d948c0a6c6f6f70696e672e7079947d948c2564656620626163746572696147726f777468286e302c20616c7068612c204b2c204e293a20944b118ce72222220a2020202043616c63756c6174652074696d6520756e74696c2062616374657269612067726f77746820657863656564204e207374617274696e672066726f6d206120706f70756c6174696f6e206f66206e302062616374657269612e0a2020202068696e74733a0a20202020202020202a20636f6e7369646572206e300a20202020202020202a20616c706861203e20300a202020203a706172616d206e303a0a202020203a706172616d20616c7068613a0a202020203a706172616d204b3a0a202020203a706172616d204e3a0a202020203a72657475726e3a0a202020202222229486947373756a250100006a3a01000086946a2801000086948c29626163746572696147726f7774682831302c20302e342c20313030302c2035303029203d203134203f946a250100006a3a01000086946a2c01000086947d944b004b0e736a250100006a3a01000086946a3001000086946a320100006a250100006a4201000086946a2801000086948c29626163746572696147726f777468283130302c20312e342c20313030302c2035303029203d2033203f946a250100006a4201000086946a2c01000086947d944b004b03736a250100006a4201000086946a3001000086946a320100006a250100006a4a01000086946a2801000086948c2f626163746572696147726f777468283130302c20302e303030342c20313030302c2035303029203d2035343934203f946a250100006a4a01000086946a2c01000086947d944b004d7615736a250100006a4a01000086946a3001000086946a320100006a250100006a5201000086946a2801000086948c28626163746572696147726f777468283130302c20302e342c20313030302c20393929203d2030203f946a250100006a5201000086946a2c01000086947d944b004b00736a250100006a5201000086946a3001000086946a32010000686a473fcf9d9a00000000758c104665726d656e746174696f6e52617465947d94288c104665726d656e746174696f6e52617465948c0a746573745f72617465319486948c057469746c659486948c476665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c2031352c20323529203d2031392e363030203f946a7c0100006a7d01000086948c066173736572749486947d944b006810687a43089a999999999933409486945294736a7c0100008c0a746573745f72617465329486946a7f01000086948c476665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c20312c2032303029203d2032392e393735203f946a7c0100006a8901000086946a8301000086947d944b006810687a43089899999999f93d409486945294736a7c0100008c0a746573745f72617465339486946a7f01000086948c286665726d656e746174696f6e52617465285b312e37355d2c20312c203229203d20312e373530203f946a7c0100006a9301000086946a8301000086947d944b006810687a4308000000000000fc3f9486945294736a7c0100008c0a746573745f72617465349486946a7f01000086948c496665726d656e746174696f6e52617465285b32302e312c2031392e332c20312e312c2031382e322c2031392e372c202e2e2e5d2c2031382e322c20323029203d2031392e353030203f946a7c0100006a9d01000086946a8301000086947d944b006810687a43080000000000803340948694529473686a473fc74c0a0000000075752e' +name="Report1Flat" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) diff --git a/examples/02631/students/programs/unitgrade/Bacteria.pkl b/examples/02631/students/programs/unitgrade/Bacteria.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b246df45c63387d95bc69cc26deba7770d8249c5 Binary files /dev/null and b/examples/02631/students/programs/unitgrade/Bacteria.pkl differ diff --git a/examples/02631/students/programs/unitgrade/ClusterAnalysis.pkl b/examples/02631/students/programs/unitgrade/ClusterAnalysis.pkl new file mode 100644 index 0000000000000000000000000000000000000000..36635371f6e86d5f841a3827696a5909e766532b Binary files /dev/null and b/examples/02631/students/programs/unitgrade/ClusterAnalysis.pkl differ diff --git a/examples/02631/students/programs/unitgrade/FermentationRate.pkl b/examples/02631/students/programs/unitgrade/FermentationRate.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9f5cea13c96d67ff2704de398cd66b78a2db463b Binary files /dev/null and b/examples/02631/students/programs/unitgrade/FermentationRate.pkl differ diff --git a/examples/02631/students/programs/unitgrade/RemoveIncomplete.pkl b/examples/02631/students/programs/unitgrade/RemoveIncomplete.pkl new file mode 100644 index 0000000000000000000000000000000000000000..30edf2db7bc0e41f0ada9c1c7443ac1d487a6d9a Binary files /dev/null and b/examples/02631/students/programs/unitgrade/RemoveIncomplete.pkl differ diff --git a/examples/autolab_example/autolab_example.py b/examples/autolab_example/autolab_example.py index 6cdf58ac889a8c71b98bcbeb4788d299391ecf7f..a4bdf6256e612b3840653c68fef9c7217b50383d 100644 --- a/examples/autolab_example/autolab_example.py +++ b/examples/autolab_example/autolab_example.py @@ -1,9 +1,9 @@ import os -from autolab.autolab import deploy_assignment +from unitgrade_private2.autolab.autolab import deploy_assignment if __name__ == "__main__": wdir = os.getcwd() - args = [('example_simplest', 'cs101', 'report1_grade.py', 'report1_grade.py'), + args = [('example_simplest', 'programs', 'report1_grade.py', 'report1_grade.py'), ('example_framework', 'cs102', 'report2_grade.py', 'report2_grade.py'), ('example_docker', 'cs103', 'report3_complete_grade.py', 'report3_grade.py'), ] diff --git a/examples/autolab_example/tmp/cs101/cs101.yml b/examples/autolab_example/tmp/cs101/cs101.yml index 7631a7fbbb0ccf4d294b447404186abdc40fa70e..6dc13d8fbe51c544cc70389b421e3b9415c0b7d4 100644 --- a/examples/autolab_example/tmp/cs101/cs101.yml +++ b/examples/autolab_example/tmp/cs101/cs101.yml @@ -1,14 +1,14 @@ --- general: - name: cs101 + name: programs description: '' display_name: CS 101 Report 1 handin_filename: Report1_handin.token handin_directory: handin max_grace_days: 0 - handout: cs101-handout.tar - writeup: writeup/cs101.html + handout: programs-handout.tar + writeup: writeup/programs.html max_submissions: -1 disable_handins: false max_size: 2 diff --git a/examples/autolab_example/tmp/cs101/src/driver_python.py b/examples/autolab_example/tmp/cs101/src/driver_python.py index 9b3e081e764bac25f8d6acecad887fec8f971270..074cc75254946aafa89ac3b404e637151e0e2b79 100644 --- a/examples/autolab_example/tmp/cs101/src/driver_python.py +++ b/examples/autolab_example/tmp/cs101/src/driver_python.py @@ -25,7 +25,7 @@ def pfiles(): student_token_file = 'Report1_handin.token' instructor_grade_script = 'report1_grade.py' -grade_file_relative_destination = "cs101\report1_grade.py" +grade_file_relative_destination = "programs\report1_grade.py" with open(student_token_file, 'rb') as f: results = pickle.load(f) sources = results['sources'][0] @@ -55,8 +55,8 @@ def rcom(cm): start = time.time() rcom(command) # pfiles() -# for f in glob.glob(host_tmp_dir + "/cs101/*"): -# print("cs101/", f) +# for f in glob.glob(host_tmp_dir + "/programs/*"): +# print("programs/", f) # print("---") ls = glob.glob(token) # print(ls) diff --git a/examples/autolab_example/tmp/cs101/src/report1_grade.py b/examples/autolab_example/tmp/cs101/src/report1_grade.py index 8972ab5fd7d427147f65315d2b2b87f6dee0f6fb..fbbeabf6e2ea7f3ac581dc025d75ad7eb8024a2f 100644 --- a/examples/autolab_example/tmp/cs101/src/report1_grade.py +++ b/examples/autolab_example/tmp/cs101/src/report1_grade.py @@ -453,7 +453,7 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n # print("Bad output\\n\\n")\n\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom programs.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n # print("Bad output\\n\\n")\n\n\nimport programs\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [programs]' report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' name="Report1" diff --git a/examples/autolab_example/tmp/cs102/src/driver_python.py b/examples/autolab_example/tmp/cs102/src/driver_python.py index 092842afd627dc96a2f2f3ce12ee158b4c47b9ff..80da44ebac70ee9fe2aaddd943d9bebd63695722 100644 --- a/examples/autolab_example/tmp/cs102/src/driver_python.py +++ b/examples/autolab_example/tmp/cs102/src/driver_python.py @@ -55,8 +55,8 @@ def rcom(cm): start = time.time() rcom(command) # pfiles() -# for f in glob.glob(host_tmp_dir + "/cs101/*"): -# print("cs101/", f) +# for f in glob.glob(host_tmp_dir + "/programs/*"): +# print("programs/", f) # print("---") ls = glob.glob(token) # print(ls) diff --git a/examples/autolab_example/tmp/cs103/src/driver_python.py b/examples/autolab_example/tmp/cs103/src/driver_python.py index ed9bd8bd0b965021e1261cb9ff9f20c02b503594..34e6b0b80fa9195f99623bee5916624bbc30b57d 100644 --- a/examples/autolab_example/tmp/cs103/src/driver_python.py +++ b/examples/autolab_example/tmp/cs103/src/driver_python.py @@ -55,8 +55,8 @@ def rcom(cm): start = time.time() rcom(command) # pfiles() -# for f in glob.glob(host_tmp_dir + "/cs101/*"): -# print("cs101/", f) +# for f in glob.glob(host_tmp_dir + "/programs/*"): +# print("programs/", f) # print("---") ls = glob.glob(token) # print(ls) diff --git a/examples/example_docker/instructor/cs103/.coverage b/examples/example_docker/instructor/cs103/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..f386b2198168450113494de5b3b3bd99d653d108 Binary files /dev/null and b/examples/example_docker/instructor/cs103/.coverage differ diff --git a/examples/example_docker/instructor/cs103/Report3_handin_25_of_30.token b/examples/example_docker/instructor/cs103/Report3_handin_25_of_30.token new file mode 100644 index 0000000000000000000000000000000000000000..af7c703d2692f8bf15644581b9cdf6688b4b9c3b Binary files /dev/null and b/examples/example_docker/instructor/cs103/Report3_handin_25_of_30.token differ diff --git a/examples/example_docker/instructor/cs103/Report3_handin_30_of_30.token b/examples/example_docker/instructor/cs103/Report3_handin_30_of_30.token new file mode 100644 index 0000000000000000000000000000000000000000..0ddcac53870efc12081817272c4b694c4c34fe9e Binary files /dev/null and b/examples/example_docker/instructor/cs103/Report3_handin_30_of_30.token differ diff --git a/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc index 4beaff64aef655b294b4513aa4842f7eada9c024..c2ad43883cce209ac9cd30ec4db9f197297be5f1 100644 Binary files a/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc and b/examples/example_docker/instructor/cs103/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc index da19a02cc1c508d3eaafcf35b0b34e9d58501d7e..78157d3ccd77777a3e6bdc5d8ef57fa286b7b497 100644 Binary files a/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc and b/examples/example_docker/instructor/cs103/__pycache__/report3.cpython-38.pyc differ diff --git a/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc index 921af5c7208e7aa4d953fa1e4551029aeb05c182..ca3fa7b43ad2a579965093501504022367ac79d6 100644 Binary files a/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc and b/examples/example_docker/instructor/cs103/__pycache__/report3_complete.cpython-38.pyc differ diff --git a/examples/example_docker/instructor/cs103/deploy.py b/examples/example_docker/instructor/cs103/deploy.py index 9379f590784446c87e4008fd2d59bdc59660c26a..e1350f9cca5da84a9329f232008b6cc6d2820f27 100644 --- a/examples/example_docker/instructor/cs103/deploy.py +++ b/examples/example_docker/instructor/cs103/deploy.py @@ -4,25 +4,24 @@ from unitgrade_private2.hidden_create_files import setup_grade_file_report from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet from unitgrade_private2.deployment import remove_hidden_methods from unitgrade_private2.docker_helpers import docker_run_token_file -import shutil import os import glob import pickle from snipper.snip_dir import snip_dir +wd = os.path.dirname(__file__) def deploy_student_files(): setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False) - # Report3.reset() fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py") setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False) - sdir = "../../students/cs103" - snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py']) + sdir = wd+"/../../students/cs103" + snip_dir(source_dir=wd+"/../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py']) return sdir def run_student_code_on_docker(Dockerfile, student_token_file): token = docker_run_token_file(Dockerfile_location=Dockerfile, - host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp", + host_tmp_dir=os.path.dirname(Dockerfile) + "/home", student_token_file=student_token_file, instructor_grade_script="report3_complete_grade.py") with open(token, 'rb') as f: @@ -32,17 +31,14 @@ def run_student_code_on_docker(Dockerfile, student_token_file): if __name__ == "__main__": # Step 1: Deploy the students files and return the directory they were written to student_directory = deploy_student_files() - # import sys - # sys.exit() - # student_directory = "../../students/cs103" + # Step 2: Simulate that the student run their report script and generate a .token file. os.system("cd ../../students && python -m cs103.report3_grade") student_token_file = glob.glob(student_directory + "/*.token")[0] - # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt). - Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile" - os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .") + Dockerfile = os.path.dirname(__file__) + "/../../../../docker_images/unitgrade-docker/Dockerfile" + os.system(f"cd {os.path.dirname(Dockerfile)} && docker build --tag unitgrade-docker .") # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file: checked_token = run_student_code_on_docker(Dockerfile, student_token_file) diff --git a/examples/example_docker/instructor/cs103/report3.py b/examples/example_docker/instructor/cs103/report3.py index 6dfbe04f7107436eb376dbd03206f30f44472287..3bdc6e6b4740095b2bbdab5b924b3f31fc9c4db3 100644 --- a/examples/example_docker/instructor/cs103/report3.py +++ b/examples/example_docker/instructor/cs103/report3.py @@ -1,5 +1,5 @@ -from unitgrade2.unitgrade2 import UTestCase, Report, hide -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, Report +from src.unitgrade2 import evaluate_report_student class Week1(UTestCase): """ The first question for week 1. """ @@ -21,4 +21,6 @@ class Report3(Report): pack_imports = [cs103] if __name__ == "__main__": + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet(Report3()) evaluate_report_student(Report3()) \ No newline at end of file diff --git a/examples/example_docker/instructor/cs103/report3_complete.py b/examples/example_docker/instructor/cs103/report3_complete.py index 4e72f820656948ed33cb485d3c940c6e4f1fd85a..dd85bd86d96d51dcd973995af8bb98788c9fca42 100644 --- a/examples/example_docker/instructor/cs103/report3_complete.py +++ b/examples/example_docker/instructor/cs103/report3_complete.py @@ -1,5 +1,5 @@ from unitgrade2.unitgrade2 import UTestCase, Report, hide -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from unitgrade2 import evaluate_report_student class Week1(UTestCase): """ The first question for week 1. """ @@ -30,4 +30,6 @@ class Report3(Report): pack_imports = [cs103] if __name__ == "__main__": + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet(Report3()) evaluate_report_student(Report3()) diff --git a/examples/example_docker/instructor/cs103/report3_complete_grade.py b/examples/example_docker/instructor/cs103/report3_complete_grade.py index b053e48dd7d2feeb7bc45cebec0c175ec41590a6..8ea5f2e975d3a8e8f50c6047888728d222e800b4 100644 --- a/examples/example_docker/instructor/cs103/report3_complete_grade.py +++ b/examples/example_docker/instructor/cs103/report3_complete_grade.py @@ -4,15 +4,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest -# from unitgrade2.unitgrade2 import MySuite - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -113,24 +108,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -140,11 +131,10 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) @@ -153,20 +143,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f" * q{n+1}) Total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -181,15 +167,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -212,7 +199,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -233,7 +221,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -277,14 +265,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -297,12 +285,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -315,15 +306,17 @@ def gather_upload_to_campusnet(report, output_dir=None): vstring = "_v"+report.version if report.version is not None else "" token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) + token = os.path.normpath(os.path.join(output_dir, token)) + + with open(token, 'wb') as f: pickle.dump(results, f) if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -336,8 +329,8 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n @hide\n def test_add_hidden(self):\n # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n # See the output in the student directory for more information.\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n @hide\n def test_hidden_fail(self):\n self.assertEqual(2,3)\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' -report1_payload = '80049586000000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04756803680486948c0474696d659486944700000000000000008c0474696d6594473f60628000000000758c0d4175746f6d6174696350617373947d94680c473f689d000000000073752e' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n @hide\n def test_add_hidden(self):\n # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n # See the output in the student directory for more information.\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n @hide\n def test_hidden_fail(self):\n self.assertEqual(2,3)\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049589000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b047568018c0f746573745f6164645f68696464656e948694680586947d944b004b04738c0474696d6594473fe3b8a400000000758c0d4175746f6d6174696350617373947d94680c473fc45a520000000073752e' name="Report3" report = source_instantiate(name, report1_source, report1_payload) diff --git a/examples/example_docker/instructor/cs103/report3_grade.py b/examples/example_docker/instructor/cs103/report3_grade.py index 06bc99f052747054bba09354cd0d4c8fa5e230c4..3b6b512cd4891d38f167f60e8713f8f0fb9154fa 100644 --- a/examples/example_docker/instructor/cs103/report3_grade.py +++ b/examples/example_docker/instructor/cs103/report3_grade.py @@ -4,15 +4,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest -# from unitgrade2.unitgrade2 import MySuite - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -113,24 +108,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -140,11 +131,10 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) @@ -153,20 +143,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f" * q{n+1}) Total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -181,15 +167,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -212,7 +199,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -233,7 +221,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -277,14 +265,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -297,12 +285,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -315,15 +306,17 @@ def gather_upload_to_campusnet(report, output_dir=None): vstring = "_v"+report.version if report.version is not None else "" token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) + token = os.path.normpath(os.path.join(output_dir, token)) + + with open(token, 'wb') as f: pickle.dump(results, f) if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -336,8 +329,8 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' -report1_payload = '80049525010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04756803680486948c0474696d65948694473f506a000000000068038c0f746573745f6164645f68696464656e948694680686947d944b004b04736803680c8694680a86944700000000000000008c0474696d6594473f926de000000000758c0d4175746f6d6174696350617373947d94288c0d4175746f6d6174696350617373948c10746573745f68696464656e5f6661696c9486948c066173736572749486947d9468158c13746573745f73747564656e745f706173736564948694681886947d946815681b86948c0474696d659486944700000000000000006812473f9894100000000075752e' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049568000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04758c0474696d6594473fb71ac800000000758c0d4175746f6d6174696350617373947d946808473fb127100000000073752e' name="Report3" report = source_instantiate(name, report1_source, report1_payload) diff --git a/examples/example_docker/instructor/cs103/unitgrade/AutomaticPass.pkl b/examples/example_docker/instructor/cs103/unitgrade/AutomaticPass.pkl index 2a722e2b9c8264b76eca73fec2c7dd84eb0e02d3..9b6ff7ac689837f86e1b0e393993ec7acbb784e8 100644 Binary files a/examples/example_docker/instructor/cs103/unitgrade/AutomaticPass.pkl and b/examples/example_docker/instructor/cs103/unitgrade/AutomaticPass.pkl differ diff --git a/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl b/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl index fe27b785553c86fe6975853b9990eed439d2d5bc..20eb565b4b7903e4aef2d3d44e08726a2b0e14ed 100644 Binary files a/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl and b/examples/example_docker/instructor/cs103/unitgrade/Week1.pkl differ diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_5_of_30.token b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_5_of_30.token deleted file mode 100644 index 675c59014e1063147604cc9ab25520eb3d1bbb5e..0000000000000000000000000000000000000000 Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/Report3_handin_5_of_30.token and /dev/null differ diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py deleted file mode 100644 index b053e48dd7d2feeb7bc45cebec0c175ec41590a6..0000000000000000000000000000000000000000 --- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_complete_grade.py +++ /dev/null @@ -1,345 +0,0 @@ - -import numpy as np -from tabulate import tabulate -from datetime import datetime -import pyfiglet -import unittest -# from unitgrade2.unitgrade2 import MySuite - -import inspect -import os -import argparse -import sys -import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue - -parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: -To run all tests in a report: - -> python assignment1_dp.py - -To run only question 2 or question 2.1 - -> python assignment1_dp.py -q 2 -> python assignment1_dp.py -q 2.1 - -Note this scripts does not grade your report. To grade your report, use: - -> python report1_grade.py - -Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. -For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: - -> python -m course_package.report1 - -see https://docs.python.org/3.9/using/cmdline.html -""", formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') -parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') -parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') -parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') -parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') - -def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): - args = parser.parse_args() - if question is None and args.q is not None: - question = args.q - if "." in question: - question, qitem = [int(v) for v in question.split(".")] - else: - question = int(question) - - if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: - raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") - - if unmute is None: - unmute = args.unmute - if passall is None: - passall = args.passall - - results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, - show_tol_err=show_tol_err) - - - if question is None: - print("Provisional evaluation") - tabulate(table_data) - table = table_data - print(tabulate(table)) - print(" ") - - fr = inspect.getouterframes(inspect.currentframe())[1].filename - gfile = os.path.basename(fr)[:-3] + "_grade.py" - if os.path.exists(gfile): - print("Note your results have not yet been registered. \nTo register your results, please run the file:") - print(">>>", gfile) - print("In the same manner as you ran this file.") - - - return results - - -def upack(q): - # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) - h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] - h = np.asarray(h) - return h[:,0], h[:,1], h[:,2], - -class UnitgradeTextRunner(unittest.TextTestRunner): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - -class SequentialTestLoader(unittest.TestLoader): - def getTestCaseNames(self, testCaseClass): - test_names = super().getTestCaseNames(testCaseClass) - # testcase_methods = list(testCaseClass.__dict__.keys()) - ls = [] - for C in testCaseClass.mro(): - if issubclass(C, unittest.TestCase): - ls = list(C.__dict__.keys()) + ls - testcase_methods = ls - test_names.sort(key=testcase_methods.index) - return test_names - -def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, - show_progress_bar=True, - show_tol_err=False, - big_header=True): - - now = datetime.now() - if big_header: - ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") - b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) - else: - b = "Unitgrade" - print(b + " v" + __version__) - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) - s = report.title - if hasattr(report, "version") and report.version is not None: - s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") - # print(f"Loaded answers from: ", report.computed_answers_file, "\n") - table_data = [] - nL = 80 - t_start = time.time() - score = {} - loader = SequentialTestLoader() - - for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) - if question is not None and n+1 != question: - continue - suite = loader.loadTestsFromTestCase(q) - qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ - q_title_print = "Question %i: %s"%(n+1, qtitle) - print(q_title_print, end="") - q.possible = 0 - q.obtained = 0 - q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] - UTextResult.q_title_print = q_title_print # Hacky - UTextResult.show_progress_bar = show_progress_bar # Hacky. - UTextResult.number = n - - res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - - possible = res.testsRun - obtained = len(res.successes) - - assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 - score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} - q.obtained = obtained - q.possible = possible - - s1 = f"*** Question q{n+1}" - s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) - print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) - - ws, possible, obtained = upack(score) - possible = int( msum(possible) ) - obtained = int( msum(obtained) ) # Cast to python int - report.possible = possible - report.obtained = obtained - now = datetime.now() - dt_string = now.strftime("%H:%M:%S") - - dt = int(time.time()-t_start) - minutes = dt//60 - seconds = dt - minutes*60 - plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") - - table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) - results = {'total': (obtained, possible), 'details': score} - return results, table_data - - - - -from tabulate import tabulate -from datetime import datetime -import inspect -import json -import os -import bz2 -import pickle -import os - -def bzwrite(json_str, token): # to get around obfuscation issues - with getattr(bz2, 'open')(token, "wt") as f: - f.write(json_str) - -def gather_imports(imp): - resources = {} - m = imp - # for m in pack_imports: - # print(f"*** {m.__name__}") - f = m.__file__ - # dn = os.path.dirname(f) - # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) - # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: - top_package = os.path.dirname(m.__file__) - module_import = True - else: - top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] - module_import = False - - # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) - # top_package = os.path.dirname(top_package) - import zipfile - # import strea - # zipfile.ZipFile - import io - # file_like_object = io.BytesIO(my_zip_data) - zip_buffer = io.BytesIO() - with zipfile.ZipFile(zip_buffer, 'w') as zip: - # zip.write() - for root, dirs, files in os.walk(top_package): - for file in files: - if file.endswith(".py"): - fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) - zip.write(fpath, v) - - resources['zipfile'] = zip_buffer.getvalue() - resources['top_package'] = top_package - resources['module_import'] = module_import - return resources, top_package - - if f.endswith("__init__.py"): - for root, dirs, files in os.walk(os.path.dirname(f)): - for file in files: - if file.endswith(".py"): - # print(file) - # print() - v = os.path.relpath(os.path.join(root, file), top_package) - with open(os.path.join(root, file), 'r') as ff: - resources[v] = ff.read() - else: - v = os.path.relpath(f, top_package) - with open(f, 'r') as ff: - resources[v] = ff.read() - return resources - -import argparse -parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: - -> python report1_grade.py - -Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. -For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: - -> python -m course_package.report1 - -see https://docs.python.org/3.9/using/cmdline.html -""", formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') -parser.add_argument('--autolab', action="store_true", help='Show Autolab results') - -def gather_upload_to_campusnet(report, output_dir=None): - n = report.nL - args = parser.parse_args() - results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, - show_progress_bar=not args.noprogress, - big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) - # also load the source code of missing files... - - sources = {} - - if not args.autolab: - if len(report.individual_imports) > 0: - print("By uploading the .token file, you verify the files:") - for m in report.individual_imports: - print(">", m.__file__) - print("Are created/modified individually by you in agreement with DTUs exam rules") - report.pack_imports += report.individual_imports - - if len(report.pack_imports) > 0: - print("Including files in upload...") - for k, m in enumerate(report.pack_imports): - nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) - nimp['report_relative_location'] = report_relative_location - nimp['name'] = m.__name__ - sources[k] = nimp - # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") - # sources = {**sources, **nimp} - results['sources'] = sources - - if output_dir is None: - output_dir = os.getcwd() - - payload_out_base = report.__class__.__name__ + "_handin" - - obtain, possible = results['total'] - vstring = "_v"+report.version if report.version is not None else "" - - token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) - with open(token, 'wb') as f: - pickle.dump(results, f) - - if not args.autolab: - print(" ") - print("To get credit for your results, please upload the single file: ") - print(">", token) - print("To campusnet without any modifications.") - - # print("Now time for some autolab fun") - -def source_instantiate(name, report1_source, payload): - eval("exec")(report1_source, globals()) - pl = pickle.loads(bytes.fromhex(payload)) - report = eval(name)(payload=pl, strict=True) - # report.set_payload(pl) - return report - - - -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n @hide\n def test_add_hidden(self):\n # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test.\n # See the output in the student directory for more information.\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n @hide\n def test_hidden_fail(self):\n self.assertEqual(2,3)\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' -report1_payload = '80049586000000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04756803680486948c0474696d659486944700000000000000008c0474696d6594473f60628000000000758c0d4175746f6d6174696350617373947d94680c473f689d000000000073752e' -name="Report3" - -report = source_instantiate(name, report1_source, report1_payload) -output_dir = os.path.dirname(__file__) -gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py b/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py deleted file mode 100644 index ecff9f7ce0634562a106d021cda68177c65c12fb..0000000000000000000000000000000000000000 --- a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/report3_grade.py +++ /dev/null @@ -1,347 +0,0 @@ -""" -Example student code. This file is automatically generated from the files in the instructor-directory -""" -import numpy as np -from tabulate import tabulate -from datetime import datetime -import pyfiglet -import unittest -# from unitgrade2.unitgrade2 import MySuite - -import inspect -import os -import argparse -import sys -import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue - -parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: -To run all tests in a report: - -> python assignment1_dp.py - -To run only question 2 or question 2.1 - -> python assignment1_dp.py -q 2 -> python assignment1_dp.py -q 2.1 - -Note this scripts does not grade your report. To grade your report, use: - -> python report1_grade.py - -Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. -For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: - -> python -m course_package.report1 - -see https://docs.python.org/3.9/using/cmdline.html -""", formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') -parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') -parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') -parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') -parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') - -def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): - args = parser.parse_args() - if question is None and args.q is not None: - question = args.q - if "." in question: - question, qitem = [int(v) for v in question.split(".")] - else: - question = int(question) - - if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: - raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") - - if unmute is None: - unmute = args.unmute - if passall is None: - passall = args.passall - - results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, - show_tol_err=show_tol_err) - - - if question is None: - print("Provisional evaluation") - tabulate(table_data) - table = table_data - print(tabulate(table)) - print(" ") - - fr = inspect.getouterframes(inspect.currentframe())[1].filename - gfile = os.path.basename(fr)[:-3] + "_grade.py" - if os.path.exists(gfile): - print("Note your results have not yet been registered. \nTo register your results, please run the file:") - print(">>>", gfile) - print("In the same manner as you ran this file.") - - - return results - - -def upack(q): - # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) - h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] - h = np.asarray(h) - return h[:,0], h[:,1], h[:,2], - -class UnitgradeTextRunner(unittest.TextTestRunner): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - -class SequentialTestLoader(unittest.TestLoader): - def getTestCaseNames(self, testCaseClass): - test_names = super().getTestCaseNames(testCaseClass) - # testcase_methods = list(testCaseClass.__dict__.keys()) - ls = [] - for C in testCaseClass.mro(): - if issubclass(C, unittest.TestCase): - ls = list(C.__dict__.keys()) + ls - testcase_methods = ls - test_names.sort(key=testcase_methods.index) - return test_names - -def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, - show_progress_bar=True, - show_tol_err=False, - big_header=True): - - now = datetime.now() - if big_header: - ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") - b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) - else: - b = "Unitgrade" - print(b + " v" + __version__) - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) - s = report.title - if hasattr(report, "version") and report.version is not None: - s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") - # print(f"Loaded answers from: ", report.computed_answers_file, "\n") - table_data = [] - nL = 80 - t_start = time.time() - score = {} - loader = SequentialTestLoader() - - for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) - if question is not None and n+1 != question: - continue - suite = loader.loadTestsFromTestCase(q) - qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ - q_title_print = "Question %i: %s"%(n+1, qtitle) - print(q_title_print, end="") - q.possible = 0 - q.obtained = 0 - q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] - UTextResult.q_title_print = q_title_print # Hacky - UTextResult.show_progress_bar = show_progress_bar # Hacky. - UTextResult.number = n - - res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - - possible = res.testsRun - obtained = len(res.successes) - - assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 - score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} - q.obtained = obtained - q.possible = possible - - s1 = f"*** Question q{n+1}" - s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) - print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) - - ws, possible, obtained = upack(score) - possible = int( msum(possible) ) - obtained = int( msum(obtained) ) # Cast to python int - report.possible = possible - report.obtained = obtained - now = datetime.now() - dt_string = now.strftime("%H:%M:%S") - - dt = int(time.time()-t_start) - minutes = dt//60 - seconds = dt - minutes*60 - plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") - - table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) - results = {'total': (obtained, possible), 'details': score} - return results, table_data - - - - -from tabulate import tabulate -from datetime import datetime -import inspect -import json -import os -import bz2 -import pickle -import os - -def bzwrite(json_str, token): # to get around obfuscation issues - with getattr(bz2, 'open')(token, "wt") as f: - f.write(json_str) - -def gather_imports(imp): - resources = {} - m = imp - # for m in pack_imports: - # print(f"*** {m.__name__}") - f = m.__file__ - # dn = os.path.dirname(f) - # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) - # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: - top_package = os.path.dirname(m.__file__) - module_import = True - else: - top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] - module_import = False - - # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) - # top_package = os.path.dirname(top_package) - import zipfile - # import strea - # zipfile.ZipFile - import io - # file_like_object = io.BytesIO(my_zip_data) - zip_buffer = io.BytesIO() - with zipfile.ZipFile(zip_buffer, 'w') as zip: - # zip.write() - for root, dirs, files in os.walk(top_package): - for file in files: - if file.endswith(".py"): - fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) - zip.write(fpath, v) - - resources['zipfile'] = zip_buffer.getvalue() - resources['top_package'] = top_package - resources['module_import'] = module_import - return resources, top_package - - if f.endswith("__init__.py"): - for root, dirs, files in os.walk(os.path.dirname(f)): - for file in files: - if file.endswith(".py"): - # print(file) - # print() - v = os.path.relpath(os.path.join(root, file), top_package) - with open(os.path.join(root, file), 'r') as ff: - resources[v] = ff.read() - else: - v = os.path.relpath(f, top_package) - with open(f, 'r') as ff: - resources[v] = ff.read() - return resources - -import argparse -parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: - -> python report1_grade.py - -Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. -For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: - -> python -m course_package.report1 - -see https://docs.python.org/3.9/using/cmdline.html -""", formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') -parser.add_argument('--autolab', action="store_true", help='Show Autolab results') - -def gather_upload_to_campusnet(report, output_dir=None): - n = report.nL - args = parser.parse_args() - results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, - show_progress_bar=not args.noprogress, - big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) - # also load the source code of missing files... - - sources = {} - - if not args.autolab: - if len(report.individual_imports) > 0: - print("By uploading the .token file, you verify the files:") - for m in report.individual_imports: - print(">", m.__file__) - print("Are created/modified individually by you in agreement with DTUs exam rules") - report.pack_imports += report.individual_imports - - if len(report.pack_imports) > 0: - print("Including files in upload...") - for k, m in enumerate(report.pack_imports): - nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) - nimp['report_relative_location'] = report_relative_location - nimp['name'] = m.__name__ - sources[k] = nimp - # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") - # sources = {**sources, **nimp} - results['sources'] = sources - - if output_dir is None: - output_dir = os.getcwd() - - payload_out_base = report.__class__.__name__ + "_handin" - - obtain, possible = results['total'] - vstring = "_v"+report.version if report.version is not None else "" - - token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) - with open(token, 'wb') as f: - pickle.dump(results, f) - - if not args.autolab: - print(" ") - print("To get credit for your results, please upload the single file: ") - print(">", token) - print("To campusnet without any modifications.") - - # print("Now time for some autolab fun") - -def source_instantiate(name, report1_source, payload): - eval("exec")(report1_source, globals()) - pl = pickle.loads(bytes.fromhex(payload)) - report = eval(name)(payload=pl, strict=True) - # report.set_payload(pl) - return report - - - -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' -report1_payload = '80049525010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04756803680486948c0474696d65948694473f506a000000000068038c0f746573745f6164645f68696464656e948694680686947d944b004b04736803680c8694680a86944700000000000000008c0474696d6594473f926de000000000758c0d4175746f6d6174696350617373947d94288c0d4175746f6d6174696350617373948c10746573745f68696464656e5f6661696c9486948c066173736572749486947d9468158c13746573745f73747564656e745f706173736564948694681886947d946815681b86948c0474696d659486944700000000000000006812473f9894100000000075752e' -name="Report3" - -report = source_instantiate(name, report1_source, report1_payload) -output_dir = os.path.dirname(__file__) -gather_upload_to_campusnet(report, output_dir) diff --git a/examples/example_docker/run_all_docker.py b/examples/example_docker/run_all_docker.py new file mode 100644 index 0000000000000000000000000000000000000000..15e9ea7b465ca803d16015252be24857434a15c9 --- /dev/null +++ b/examples/example_docker/run_all_docker.py @@ -0,0 +1,57 @@ +from unitgrade_private2.docker_helpers import docker_run_token_file +import os +import glob +import pickle +import time + +""" Run all examples on docker. """ + +if __name__ == "__main__": + # Step 0: Compile our two docker images. + from unitgrade_private2.docker_helpers import compile_docker_image + + docker_files = [f"{os.getcwd()}/../../docker_images/unitgrade-docker/Dockerfile"] + docker_tags = [] + for f in docker_files: + tag = compile_docker_image(f) + docker_tags.append( (f, tag) ) + + EX_BASE = f"{os.getcwd()}/../" # Base of examples. + + runs = [ + ("example_flat", "programs", "programs/report1flat_grade.py", "programs", "programs/report1flat_grade.py",), + ("example_simplest", "", "cs101/report1_grade.py", "", "cs101/report1_grade.py", ), + ("example_framework", "", "cs102/report2_grade.py", "", "cs102/report2_grade.py", ), + ("example_docker", "", "cs103/report3_complete_grade.py", "", "cs103/report3_grade.py",), + ] + rs = [] + + def p2mod(file, base): + return ".".join(os.path.normpath(os.path.relpath(file, base)).split(os.sep))[:-3] + start = time.time() + for ex, ibase, ig, sbase, sg in runs: + ibase = f"{EX_BASE}/{ex}/instructor/{ibase}" + ig = f"{EX_BASE}/{ex}/instructor/{ig}" + sbase = f"{EX_BASE}/{ex}/students/{sbase}" + sg = f"{EX_BASE}/{ex}/students/{sg}" + + # Uncomment to run example deployment scripts: + # os.system(f"cd {ibase} && python -m {p2mod(os.path.dirname(ig) + '/deploy.py', ibase)}") + + os.system(f"cd {sbase} && python -m { p2mod(sg, sbase) }") + stoken = glob.glob(f"{os.path.dirname(sg)}/*.token")[0] + + Dockerfile, tag = docker_tags[0] # Get first docker file. + token = docker_run_token_file(Dockerfile_location=Dockerfile, + host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp", + student_token_file=stoken, + instructor_grade_script=ig) + with open(token, 'rb') as f: + iresults = pickle.load(f) + with open(stoken, 'rb') as f: + sresults = pickle.load(f) + rs.append( (ex, sresults, iresults) ) + + for ex, sresults, iresults in rs: + print( f"[{ex}]", "Student's score was:", sresults['total'], "score using my eval script", iresults['total']) + print("Total elapsed time", time.time()-start, "seconds") \ No newline at end of file diff --git a/examples/example_docker/students/cs103/.coverage b/examples/example_docker/students/cs103/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..f386b2198168450113494de5b3b3bd99d653d108 Binary files /dev/null and b/examples/example_docker/students/cs103/.coverage differ diff --git a/examples/example_docker/students/cs103/Report3_handin_10_of_30.token b/examples/example_docker/students/cs103/Report3_handin_10_of_30.token index 7231343c1882f4366cfe40348680c8d947889798..3880b47087c30a84d5471426ba613e6421ea54fe 100644 Binary files a/examples/example_docker/students/cs103/Report3_handin_10_of_30.token and b/examples/example_docker/students/cs103/Report3_handin_10_of_30.token differ diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc index 5c552a81fc42feaf0654da24d93270b73dc806af..d82e790ad8cee1871f1919af140451af558d6b85 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc index 6dfacc25f48700797f5286eae416edaeaf0e2b5b..0e7a0c627f56abdd78d2ba3ec05f9d13ea233030 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc and b/examples/example_docker/students/cs103/__pycache__/homework1.cpython-39.pyc differ diff --git a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc index da19a02cc1c508d3eaafcf35b0b34e9d58501d7e..78157d3ccd77777a3e6bdc5d8ef57fa286b7b497 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3.cpython-38.pyc differ diff --git a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc index 921af5c7208e7aa4d953fa1e4551029aeb05c182..41e62d06e5af5bb771d3c92d6afde5e8a0d36380 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-38.pyc differ diff --git a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc index 2f18f8be0976727ab41dd1114924b8483c6d3aac..3e87b71662ab4e431da5c9b952594f5411205f72 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc and b/examples/example_docker/students/cs103/__pycache__/report3_complete.cpython-39.pyc differ diff --git a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_complete_grade.cpython-39.pyc similarity index 90% rename from examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc rename to examples/example_docker/students/cs103/__pycache__/report3_complete_grade.cpython-39.pyc index 8bcaebe92988bf2a57647836e6aabc65f30fdadc..5efad03805d904ffdc7a11abb12cf5d179ea47c5 100644 Binary files a/examples/example_docker/instructor/unitgrade-docker/tmp/cs103/__pycache__/report3_complete_grade.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3_complete_grade.cpython-39.pyc differ diff --git a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc index 7433fca776754f5b6910141e9adc509cbf13a6f5..fe08941631eb3353b6394ba8fca90bb65c4db357 100644 Binary files a/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc and b/examples/example_docker/students/cs103/__pycache__/report3_grade.cpython-38.pyc differ diff --git a/examples/example_docker/students/cs103/report3.py b/examples/example_docker/students/cs103/report3.py index c97b5a4117c254a17a5fed6787a485f4e69e0ebf..f83bb5384fa32a708a2be59d120813fd92bdc9ef 100644 --- a/examples/example_docker/students/cs103/report3.py +++ b/examples/example_docker/students/cs103/report3.py @@ -1,8 +1,8 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -from unitgrade2.unitgrade2 import UTestCase, Report, hide -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, Report +from src.unitgrade2 import evaluate_report_student class Week1(UTestCase): """ The first question for week 1. """ @@ -24,4 +24,6 @@ class Report3(Report): pack_imports = [cs103] if __name__ == "__main__": + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet(Report3()) evaluate_report_student(Report3()) diff --git a/examples/example_docker/students/cs103/report3_grade.py b/examples/example_docker/students/cs103/report3_grade.py index ecff9f7ce0634562a106d021cda68177c65c12fb..3c64c04d0172461f76215a8826a27e68a4d7071e 100644 --- a/examples/example_docker/students/cs103/report3_grade.py +++ b/examples/example_docker/students/cs103/report3_grade.py @@ -6,15 +6,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest -# from unitgrade2.unitgrade2 import MySuite - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -115,24 +110,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -142,11 +133,10 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) @@ -155,20 +145,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f" * q{n+1}) Total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -183,15 +169,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -214,7 +201,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -235,7 +223,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -279,14 +267,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -299,12 +287,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -317,15 +308,17 @@ def gather_upload_to_campusnet(report, output_dir=None): vstring = "_v"+report.version if report.version is not None else "" token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) + token = os.path.normpath(os.path.join(output_dir, token)) + + with open(token, 'wb') as f: pickle.dump(results, f) if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -338,8 +331,8 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' -report1_payload = '80049525010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04756803680486948c0474696d65948694473f506a000000000068038c0f746573745f6164645f68696464656e948694680686947d944b004b04736803680c8694680a86944700000000000000008c0474696d6594473f926de000000000758c0d4175746f6d6174696350617373947d94288c0d4175746f6d6174696350617373948c10746573745f68696464656e5f6661696c9486948c066173736572749486947d9468158c13746573745f73747564656e745f706173736564948694681886947d946815681b86948c0474696d659486944700000000000000006812473f9894100000000075752e' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.normpath(os.path.join(output_dir, token))\n\n\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n from cs103.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n\nclass AutomaticPass(UTestCase):\n def test_student_passed(self):\n self.assertEqual(2,2)\n\n\nimport cs103\nclass Report3(Report):\n title = "CS 101 Report 3"\n questions = [(Week1, 20), (AutomaticPass, 10)] # Include a single question for 10 credits.\n pack_imports = [cs103]' +report1_payload = '80049568000000000000007d94288c055765656b31947d942868018c08746573745f6164649486948c066173736572749486947d94284b014aa1ffffff4b004b04758c0474696d6594473fb71ac800000000758c0d4175746f6d6174696350617373947d946808473fb127100000000073752e' name="Report3" report = source_instantiate(name, report1_source, report1_payload) diff --git a/examples/example_docker/students/cs103/unitgrade/AutomaticPass.pkl b/examples/example_docker/students/cs103/unitgrade/AutomaticPass.pkl index 2a722e2b9c8264b76eca73fec2c7dd84eb0e02d3..9b6ff7ac689837f86e1b0e393993ec7acbb784e8 100644 Binary files a/examples/example_docker/students/cs103/unitgrade/AutomaticPass.pkl and b/examples/example_docker/students/cs103/unitgrade/AutomaticPass.pkl differ diff --git a/examples/example_docker/students/cs103/unitgrade/Week1.pkl b/examples/example_docker/students/cs103/unitgrade/Week1.pkl index fe27b785553c86fe6975853b9990eed439d2d5bc..20eb565b4b7903e4aef2d3d44e08726a2b0e14ed 100644 Binary files a/examples/example_docker/students/cs103/unitgrade/Week1.pkl and b/examples/example_docker/students/cs103/unitgrade/Week1.pkl differ diff --git a/examples/example_flat/instructor/cs101flat/Report1Flat_handin_10_of_10.token b/examples/example_flat/instructor/cs101flat/Report1Flat_handin_10_of_10.token new file mode 100644 index 0000000000000000000000000000000000000000..6207232bba6714f3ffac3686c93f17dac4be1cb1 Binary files /dev/null and b/examples/example_flat/instructor/cs101flat/Report1Flat_handin_10_of_10.token differ diff --git a/examples/example_flat/instructor/cs101flat/__pycache__/deploy.cpython-38.pyc b/examples/example_flat/instructor/cs101flat/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c91ca3192aaec26f60abe00c1109cb32983e52a Binary files /dev/null and b/examples/example_flat/instructor/cs101flat/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_flat/instructor/cs101flat/__pycache__/homework1.cpython-38.pyc b/examples/example_flat/instructor/cs101flat/__pycache__/homework1.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad952b7db7aa4adbd8a113e6d74f318bd77ce47 Binary files /dev/null and b/examples/example_flat/instructor/cs101flat/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_flat/instructor/cs101flat/__pycache__/report1flat.cpython-38.pyc b/examples/example_flat/instructor/cs101flat/__pycache__/report1flat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b843499aeb6a5314ec477e4a8ff9439e6927c59 Binary files /dev/null and b/examples/example_flat/instructor/cs101flat/__pycache__/report1flat.cpython-38.pyc differ diff --git a/examples/example_flat/instructor/cs101flat/deploy.py b/examples/example_flat/instructor/cs101flat/deploy.py new file mode 100644 index 0000000000000000000000000000000000000000..be04b5a29b3f8e855fc9cf3671b2be5da7da60ff --- /dev/null +++ b/examples/example_flat/instructor/cs101flat/deploy.py @@ -0,0 +1,15 @@ +from report1flat import Report1Flat +from unitgrade_private2.hidden_create_files import setup_grade_file_report +from snipper import snip_dir + +if __name__ == "__main__": + setup_grade_file_report(Report1Flat, minify=False, obfuscate=False, execute=False) + + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet((Report1Flat())) + + # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper + snip_dir.snip_dir(source_dir="", dest_dir="../../students/cs101flat", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + + + diff --git a/examples/example_flat/instructor/cs101flat/homework1.py b/examples/example_flat/instructor/cs101flat/homework1.py new file mode 100644 index 0000000000000000000000000000000000000000..286b79fbac40c2d02b5874c0a73fc387835ce2b3 --- /dev/null +++ b/examples/example_flat/instructor/cs101flat/homework1.py @@ -0,0 +1,16 @@ +def reverse_list(mylist): #!f + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + return list(reversed(mylist)) + +def add(a,b): #!f + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ + return a+b + +if __name__ == "__main__": + # Problem 1: Write a function which add two numbers + print(f"Your result of 2 + 2 = {add(2,2)}") + print(f"Reversing a small list", reverse_list([2,3,5,7])) diff --git a/examples/example_flat/instructor/cs101flat/report1flat.py b/examples/example_flat/instructor/cs101flat/report1flat.py new file mode 100644 index 0000000000000000000000000000000000000000..9ede035e9249677e98fa03104e29f21cacee8a01 --- /dev/null +++ b/examples/example_flat/instructor/cs101flat/report1flat.py @@ -0,0 +1,24 @@ +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from homework1 import reverse_list, add +import unittest + +class Week1(unittest.TestCase): + def test_add(self): + self.assertEqual(add(2,2), 4) + self.assertEqual(add(-100, 5), -95) + + def test_reverse(self): + self.assertEqual(reverse_list([1,2,3]), [3,2,1]) + + +import homework1 +class Report1Flat(Report): + title = "CS 101 Report 1" + questions = [(Week1, 10)] # Include a single question for 10 credits. + pack_imports = [homework1] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Flat()) diff --git a/examples/example_flat/instructor/cs101flat/report1flat_grade.py b/examples/example_flat/instructor/cs101flat/report1flat_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..48b09802a73374665ac290657d843323f6f47b5e --- /dev/null +++ b/examples/example_flat/instructor/cs101flat/report1flat_grade.py @@ -0,0 +1,349 @@ + +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +# from unitgrade2.unitgrade2 import MySuite + +import inspect +import os +import argparse +import sys +import time +import threading # don't import Thread bc. of minify issue. +import tqdm # don't do from tqdm import tqdm because of minify-issue + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + print(b + " v" + __version__) + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + nL = 80 + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + # q = q() + # q_hidden = False + # q_hidden = issubclass(q.__class__, Hidden) + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + # unittest.Te + # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + # possible = int(ws @ possible) + # obtained = int(ws @ obtained) + # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f"*** Question q{n+1}" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + print(" ") + print("="*n) + print("Final evaluation") + print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f"*** {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single file: ") + print(">", token) + print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport os\nfrom io import StringIO\nfrom unittest.runner import _WritelnDecorator\nimport inspect\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="")\n else:\n print( dot_parts, end="")\n\n if tsecs >= 0.1:\n state += " (" + str(tsecs) + " seconds)"\n print(state)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n# def wrapper(foo):\n# def magic(self):\n# # s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n# foo(self)\n# magic.__doc__ = foo.__doc__\n# return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\n\nimport homework1\nclass Report1Flat(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' +name="Report1Flat" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/examples/example_flat/students/cs101flat/Report1Flat_handin_0_of_10.token b/examples/example_flat/students/cs101flat/Report1Flat_handin_0_of_10.token new file mode 100644 index 0000000000000000000000000000000000000000..c9122d00c8fff99b66be2f551819687826d1b9c0 Binary files /dev/null and b/examples/example_flat/students/cs101flat/Report1Flat_handin_0_of_10.token differ diff --git a/examples/example_flat/students/cs101flat/__pycache__/deploy.cpython-38.pyc b/examples/example_flat/students/cs101flat/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c91ca3192aaec26f60abe00c1109cb32983e52a Binary files /dev/null and b/examples/example_flat/students/cs101flat/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_flat/students/cs101flat/__pycache__/homework1.cpython-38.pyc b/examples/example_flat/students/cs101flat/__pycache__/homework1.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f019923d47a404593f992813d90c89b3e2da2546 Binary files /dev/null and b/examples/example_flat/students/cs101flat/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_flat/students/cs101flat/__pycache__/report1flat.cpython-38.pyc b/examples/example_flat/students/cs101flat/__pycache__/report1flat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b843499aeb6a5314ec477e4a8ff9439e6927c59 Binary files /dev/null and b/examples/example_flat/students/cs101flat/__pycache__/report1flat.cpython-38.pyc differ diff --git a/examples/example_flat/students/cs101flat/__pycache__/report1flat_grade.cpython-38.pyc b/examples/example_flat/students/cs101flat/__pycache__/report1flat_grade.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01435181f7f83079961ed2105a4d343c2511a9d0 Binary files /dev/null and b/examples/example_flat/students/cs101flat/__pycache__/report1flat_grade.cpython-38.pyc differ diff --git a/examples/example_flat/students/cs101flat/homework1.py b/examples/example_flat/students/cs101flat/homework1.py new file mode 100644 index 0000000000000000000000000000000000000000..3543f1ba46b63eec3a2c2e007ee998660c7136c6 --- /dev/null +++ b/examples/example_flat/students/cs101flat/homework1.py @@ -0,0 +1,21 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +def reverse_list(mylist): + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + # TODO: 1 lines missing. + raise NotImplementedError("Implement function body") + +def add(a,b): + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ + # TODO: 1 lines missing. + raise NotImplementedError("Implement function body") + +if __name__ == "__main__": + # Problem 1: Write a function which add two numbers + print(f"Your result of 2 + 2 = {add(2,2)}") + print(f"Reversing a small list", reverse_list([2,3,5,7])) diff --git a/examples/example_flat/students/cs101flat/report1flat.py b/examples/example_flat/students/cs101flat/report1flat.py new file mode 100644 index 0000000000000000000000000000000000000000..4a268f7ac5347c1f5b4dd0e6756560b94c5e7669 --- /dev/null +++ b/examples/example_flat/students/cs101flat/report1flat.py @@ -0,0 +1,27 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from homework1 import reverse_list, add +import unittest + +class Week1(unittest.TestCase): + def test_add(self): + self.assertEqual(add(2,2), 4) + self.assertEqual(add(-100, 5), -95) + + def test_reverse(self): + self.assertEqual(reverse_list([1,2,3]), [3,2,1]) + + +import homework1 +class Report1Flat(Report): + title = "CS 101 Report 1" + questions = [(Week1, 10)] # Include a single question for 10 credits. + pack_imports = [homework1] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Flat()) diff --git a/examples/example_flat/students/cs101flat/report1flat_grade.py b/examples/example_flat/students/cs101flat/report1flat_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..65db51beffc818d28805cd373d16a74c2aa2c8ef --- /dev/null +++ b/examples/example_flat/students/cs101flat/report1flat_grade.py @@ -0,0 +1,351 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +# from unitgrade2.unitgrade2 import MySuite + +import inspect +import os +import argparse +import sys +import time +import threading # don't import Thread bc. of minify issue. +import tqdm # don't do from tqdm import tqdm because of minify-issue + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + print(b + " v" + __version__) + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + nL = 80 + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + # q = q() + # q_hidden = False + # q_hidden = issubclass(q.__class__, Hidden) + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + # unittest.Te + # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + # possible = int(ws @ possible) + # obtained = int(ws @ obtained) + # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f"*** Question q{n+1}" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + print(" ") + print("="*n) + print("Final evaluation") + print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f"*** {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single file: ") + print(">", token) + print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport os\nfrom io import StringIO\nfrom unittest.runner import _WritelnDecorator\nimport inspect\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n# class MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n# raise Exception("no suite")\n# pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="")\n else:\n print( dot_parts, end="")\n\n if tsecs >= 0.1:\n state += " (" + str(tsecs) + " seconds)"\n print(state)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n # item_title = item_title.split("\\n")[0]\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n# def wrapper(foo):\n# def magic(self):\n# # s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n# foo(self)\n# magic.__doc__ = foo.__doc__\n# return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n def capture(self):\n return Capturing2(stdout=self._stdout)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n self._stdout = sys.stdout\n import io\n sys.stdout = io.StringIO()\n super().setUp()\n # print("Setting up...")\n\n def _callTearDown(self):\n sys.stdout = self._stdout\n super().tearDown()\n # print("asdfsfd")\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n# from unitgrade2.unitgrade2 import MySuite\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\n\nimport homework1\nclass Report1Flat(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' +name="Report1Flat" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) diff --git a/examples/example_framework/instructor/cs102/.coverage b/examples/example_framework/instructor/cs102/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..a93b4d7e94a84f8ad080beeafaa98b3784e23e91 Binary files /dev/null and b/examples/example_framework/instructor/cs102/.coverage differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_10_of_18.token b/examples/example_framework/instructor/cs102/Report2_handin_10_of_18.token deleted file mode 100644 index 3dc602acc93517ba2a3211a3f926b2dfe0cf8c90..0000000000000000000000000000000000000000 Binary files a/examples/example_framework/instructor/cs102/Report2_handin_10_of_18.token and /dev/null differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_13_of_18.token b/examples/example_framework/instructor/cs102/Report2_handin_13_of_18.token new file mode 100644 index 0000000000000000000000000000000000000000..303223eab586229ad1fd65b48a7b96322ded0274 Binary files /dev/null and b/examples/example_framework/instructor/cs102/Report2_handin_13_of_18.token differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_18_of_18.token b/examples/example_framework/instructor/cs102/Report2_handin_18_of_18.token index 01a473c0bc4e8c0b536bb6c2b01d901ca33c5689..fe0c87cc2b34d3770b07ece28291836a66a6d7ed 100644 Binary files a/examples/example_framework/instructor/cs102/Report2_handin_18_of_18.token and b/examples/example_framework/instructor/cs102/Report2_handin_18_of_18.token differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_28_of_28.token b/examples/example_framework/instructor/cs102/Report2_handin_28_of_28.token deleted file mode 100644 index 4fe9c89fea3b77998c9201c910a729cc1eb16d89..0000000000000000000000000000000000000000 Binary files a/examples/example_framework/instructor/cs102/Report2_handin_28_of_28.token and /dev/null differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_5_of_18.token b/examples/example_framework/instructor/cs102/Report2_handin_5_of_18.token deleted file mode 100644 index e22d430bac7ac0c21c0931fd87cd6871fa95b8c9..0000000000000000000000000000000000000000 Binary files a/examples/example_framework/instructor/cs102/Report2_handin_5_of_18.token and /dev/null differ diff --git a/examples/example_framework/instructor/cs102/Report2_handin_5_of_28.token b/examples/example_framework/instructor/cs102/Report2_handin_5_of_28.token deleted file mode 100644 index cb4ed5d6abfec70f738f205440eba8f73c909f7d..0000000000000000000000000000000000000000 Binary files a/examples/example_framework/instructor/cs102/Report2_handin_5_of_28.token and /dev/null differ diff --git a/examples/example_framework/instructor/cs102/__pycache__/deploy.cpython-38.pyc b/examples/example_framework/instructor/cs102/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d9b529d6cae3250756694b95ca75ce15b33bf86 Binary files /dev/null and b/examples/example_framework/instructor/cs102/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-38.pyc b/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-38.pyc index aca3c8b22c11ae4d9d824d7ba252b4194f5db12f..d67337369ba5bf909f1eb07c3dda178779750fb2 100644 Binary files a/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-38.pyc and b/examples/example_framework/instructor/cs102/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_framework/instructor/cs102/__pycache__/report2_grade.cpython-38.pyc b/examples/example_framework/instructor/cs102/__pycache__/report2_grade.cpython-38.pyc index 5fb59bc9a735c474aff24e25cd7c318cc0869f8d..42fb3a4a526346eae8494f38ac29d254b5f30b83 100644 Binary files a/examples/example_framework/instructor/cs102/__pycache__/report2_grade.cpython-38.pyc and b/examples/example_framework/instructor/cs102/__pycache__/report2_grade.cpython-38.pyc differ diff --git a/examples/example_framework/instructor/cs102/deploy.py b/examples/example_framework/instructor/cs102/deploy.py index c5859086617e82fa7a118f320895376a784aeb30..4e47e5eacaad26809890394711b3dc80438371ae 100644 --- a/examples/example_framework/instructor/cs102/deploy.py +++ b/examples/example_framework/instructor/cs102/deploy.py @@ -2,8 +2,10 @@ from cs102.report2 import Report2 from unitgrade_private2.hidden_create_files import setup_grade_file_report from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet from snipper.snip_dir import snip_dir -if __name__ == "__main__": +import os +wd = os.path.dirname(__file__) +if __name__ == "__main__": gather_upload_to_campusnet(Report2()) setup_grade_file_report(Report2, minify=False, obfuscate=False, execute=False) - snip_dir(source_dir="../cs102", dest_dir="../../students/cs102", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + snip_dir(source_dir=wd+"/../cs102", dest_dir=wd+"/../../students/cs102", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) diff --git a/examples/example_framework/instructor/cs102/report2.py b/examples/example_framework/instructor/cs102/report2.py index 381cdb16108cfb10f1705d57df8aab14bfddc97a..c7be1cdd9eef000df1fa31e08d714142454abd23 100644 --- a/examples/example_framework/instructor/cs102/report2.py +++ b/examples/example_framework/instructor/cs102/report2.py @@ -1,6 +1,7 @@ -from unitgrade2.unitgrade2 import Report -from unitgrade2.unitgrade_helpers2 import evaluate_report_student -from unitgrade2.unitgrade2 import UTestCase, cache, hide +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, cache + class Week1(UTestCase): """ The first question for week 1. """ @@ -8,9 +9,6 @@ class Week1(UTestCase): """ Docstring for this method """ from cs102.homework1 import add self.assertEqualC(add(2,2)) - with self.capture() as out: - print("hello world 42") - self.assertEqual(out.numbers[0], 42) self.assertEqualC(add(-100, 5)) def test_reverse(self): @@ -18,6 +16,12 @@ class Week1(UTestCase): from cs102.homework1 import reverse_list self.assertEqualC(reverse_list([1,2,3])) + def test_output_capture(self): + with self.capture() as out: + print("hello world 42") # Genereate some output (i.e. in a homework script) + self.assertEqual(out.numbers[0], 42) # out.numbers is a list of all numbers generated + self.assertEqual(out.output, "hello world 42") # you can also access the raw output. + class Question2(UTestCase): """ Second problem """ diff --git a/examples/example_framework/instructor/cs102/report2_grade.py b/examples/example_framework/instructor/cs102/report2_grade.py index 503237e5942ba9c82dca8454230add50e726429f..eeb50eccc733f65499b4f35cdd6423f99bc885db 100644 --- a/examples/example_framework/instructor/cs102/report2_grade.py +++ b/examples/example_framework/instructor/cs102/report2_grade.py @@ -4,14 +4,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -61,53 +57,6 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass show_tol_err=show_tol_err) - # try: # For registering stats. - # import unitgrade_private - # import irlc.lectures - # import xlwings - # from openpyxl import Workbook - # import pandas as pd - # from collections import defaultdict - # dd = defaultdict(lambda: []) - # error_computed = [] - # for k1, (q, _) in enumerate(report.questions): - # for k2, item in enumerate(q.items): - # dd['question_index'].append(k1) - # dd['item_index'].append(k2) - # dd['question'].append(q.name) - # dd['item'].append(item.name) - # dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol) - # error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed) - # - # qstats = report.wdir + "/" + report.name + ".xlsx" - # - # if os.path.isfile(qstats): - # d_read = pd.read_excel(qstats).to_dict() - # else: - # d_read = dict() - # - # for k in range(1000): - # key = 'run_'+str(k) - # if key in d_read: - # dd[key] = list(d_read['run_0'].values()) - # else: - # dd[key] = error_computed - # break - # - # workbook = Workbook() - # worksheet = workbook.active - # for col, key in enumerate(dd.keys()): - # worksheet.cell(row=1, column=col+1).value = key - # for row, item in enumerate(dd[key]): - # worksheet.cell(row=row+2, column=col+1).value = item - # - # workbook.save(qstats) - # workbook.close() - # - # except ModuleNotFoundError as e: - # s = 234 - # pass - if question is None: print("Provisional evaluation") tabulate(table_data) @@ -159,24 +108,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -186,104 +131,28 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite) - z = 234 - # for j, item in enumerate(q.items): - # if qitem is not None and question is not None and j+1 != qitem: - # continue - # - # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles. - # # if not item.question.has_called_init_: - # start = time.time() - # - # cc = None - # if show_progress_bar: - # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] ) - # cc = ActiveProgress(t=total_estimated_time, title=q_title_print) - # from unitgrade import Capturing # DON'T REMOVE THIS LINE - # with eval('Capturing')(unmute=unmute): # Clunky import syntax is required bc. of minify issue. - # try: - # for q2 in q_with_outstanding_init: - # q2.init() - # q2.has_called_init_ = True - # - # # item.question.init() # Initialize the question. Useful for sharing resources. - # except Exception as e: - # if not passall: - # if not silent: - # print(" ") - # print("="*30) - # print(f"When initializing question {q.title} the initialization code threw an error") - # print(e) - # print("The remaining parts of this question will likely fail.") - # print("="*30) - # - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(q_title_print, end="") - # - # q_time =np.round( time.time()-start, 2) - # - # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "") - # print("=" * nL) - # q_with_outstanding_init = None - # - # # item.question = q # Set the parent question instance for later reference. - # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title) - # - # if show_progress_bar: - # cc = ActiveProgress(t=item.estimated_time, title=item_title_print) - # else: - # print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="") - # hidden = issubclass(item.__class__, Hidden) - # # if not hidden: - # # print(ss, end="") - # # sys.stdout.flush() - # start = time.time() - # - # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent) - # q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title} - # tsecs = np.round(time.time()-start, 2) - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="") - # - # if not hidden: - # ss = "PASS" if current == possible else "*** FAILED" - # if tsecs >= 0.1: - # ss += " ("+ str(tsecs) + " seconds)" - # print(ss) - - # ws, possible, obtained = upack(q_) possible = res.testsRun obtained = len(res.successes) assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f"Question {n+1} total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -298,15 +167,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -329,7 +199,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -350,7 +221,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -394,14 +265,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -414,12 +285,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -438,9 +312,9 @@ def gather_upload_to_campusnet(report, output_dir=None): if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -453,8 +327,8 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n\n def test_add(self):\n """ Docstring for this method """\n from cs102.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n def test_reverse(self):\n """ Reverse a list """ # Add a title to the test.\n from cs102.homework1 import reverse_list\n self.assertEqualC(reverse_list([1,2,3]))\n\n\nclass Question2(UTestCase):\n """ Second problem """\n @cache\n def my_reversal(self, ls):\n # The \'@cache\' decorator ensures the function is not run on the *students* computer\n # Instead the code is run on the teachers computer and the result is passed on with the\n # other pre-computed results -- i.e. this function will run regardless of how the student happens to have\n # implemented reverse_list.\n from cs102.homework1 import reverse_list\n return reverse_list(ls)\n\n def test_reverse_tricky(self):\n ls = [2,4,8]\n self.title = f"Reversing a small list containing {ls=}"\n ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result.\n ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments.\n self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code.\n\n\nimport cs102\nclass Report2(Report):\n title = "CS 101 Report 2"\n questions = [(Week1, 10), (Question2, 8) ] # Include a single question for 10 credits.\n pack_imports = [cs102]' -report1_payload = '8004959a010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c057469746c659486948c19446f63737472696e6720666f722074686973206d6574686f64946803680486948c066173736572749486947d94284b004b044b014aa1ffffff756803680486948c0474696d6594869447000000000000000068038c0c746573745f72657665727365948694680686948c0e526576657273652061206c69737494680368108694680a86947d944b005d94284b034b024b016573680368108694680e86944700000000000000008c0474696d6594470000000000000000758c095175657374696f6e32947d94288c095175657374696f6e32948c13746573745f726576657273655f747269636b799486948c066173736572749486947d944b005d94284b024b044b086573681d681e86948c057469746c659486948c2e526576657273696e67206120736d616c6c206c69737420636f6e7461696e696e67206c733d5b322c20342c20385d94681d681e86948c0474696d65948694470000000000000000681a473f5066000000000075752e' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n """ Docstring for this method """\n from cs102.homework1 import add\n self.assertEqualC(add(2,2))\n with self.capture() as out:\n print("hello world 42")\n self.assertEqual(out.numbers[0], 42)\n self.assertEqualC(add(-100, 5))\n\n def test_reverse(self):\n """ Reverse a list """ # Add a title to the test.\n from cs102.homework1 import reverse_list\n self.assertEqualC(reverse_list([1,2,3]))\n\n\nclass Question2(UTestCase):\n """ Second problem """\n @cache\n def my_reversal(self, ls):\n # The \'@cache\' decorator ensures the function is not run on the *students* computer\n # Instead the code is run on the teachers computer and the result is passed on with the\n # other pre-computed results -- i.e. this function will run regardless of how the student happens to have\n # implemented reverse_list.\n from cs102.homework1 import reverse_list\n return reverse_list(ls)\n\n def test_reverse_tricky(self):\n ls = [2,4,8]\n self.title = f"Reversing a small list containing {ls=}" # Titles can be set like this at any point in the function body.\n ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result.\n ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments.\n self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code.\n\n\nimport cs102\nclass Report2(Report):\n title = "CS 101 Report 2"\n questions = [(Week1, 10), (Question2, 8) ]\n pack_imports = [cs102]' +report1_payload = '80049510010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b004b044b014aa1ffffff7568038c0c746573745f72657665727365948694680686947d944b005d94284b034b024b0165738c0474696d6594473fe6a7e700000000758c095175657374696f6e32947d94288c095175657374696f6e32948c13746573745f726576657273655f747269636b799486948c057469746c659486948c2e526576657273696e67206120736d616c6c206c69737420636f6e7461696e696e67206c733d5b322c20342c20385d946811681286948c066173736572749486947d944b005d94284b024b044b086573680e473facac280000000075752e' name="Report2" report = source_instantiate(name, report1_source, report1_payload) diff --git a/examples/example_framework/instructor/cs102/unitgrade/Question2.pkl b/examples/example_framework/instructor/cs102/unitgrade/Question2.pkl index 634a7fbbe4bad27f24b2d894ef3c0b37c4f5dd94..b950c49faa2b91b7675ee266d63a13fe3652e3cc 100644 Binary files a/examples/example_framework/instructor/cs102/unitgrade/Question2.pkl and b/examples/example_framework/instructor/cs102/unitgrade/Question2.pkl differ diff --git a/examples/example_framework/instructor/cs102/unitgrade/Week1.pkl b/examples/example_framework/instructor/cs102/unitgrade/Week1.pkl index 7912698f036128bb2a8b616c2a66c58ac9e774e5..6b4ee502e9c67e2ff3aba1b11c4911bb88195e34 100644 Binary files a/examples/example_framework/instructor/cs102/unitgrade/Week1.pkl and b/examples/example_framework/instructor/cs102/unitgrade/Week1.pkl differ diff --git a/examples/example_framework/students/cs102/.coverage b/examples/example_framework/students/cs102/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..a93b4d7e94a84f8ad080beeafaa98b3784e23e91 Binary files /dev/null and b/examples/example_framework/students/cs102/.coverage differ diff --git a/examples/example_framework/students/cs102/Report2_handin_0_of_18.token b/examples/example_framework/students/cs102/Report2_handin_0_of_18.token deleted file mode 100644 index 63734376c0eae1c4df3121a0c656e90452e80cba..0000000000000000000000000000000000000000 Binary files a/examples/example_framework/students/cs102/Report2_handin_0_of_18.token and /dev/null differ diff --git a/examples/example_framework/students/cs102/__pycache__/deploy.cpython-38.pyc b/examples/example_framework/students/cs102/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d9b529d6cae3250756694b95ca75ce15b33bf86 Binary files /dev/null and b/examples/example_framework/students/cs102/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_framework/students/cs102/__pycache__/homework1.cpython-38.pyc b/examples/example_framework/students/cs102/__pycache__/homework1.cpython-38.pyc index a099ef9f65bf987d85152d20d4dad941ae1d27bc..d67337369ba5bf909f1eb07c3dda178779750fb2 100644 Binary files a/examples/example_framework/students/cs102/__pycache__/homework1.cpython-38.pyc and b/examples/example_framework/students/cs102/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_framework/students/cs102/__pycache__/report2.cpython-38.pyc b/examples/example_framework/students/cs102/__pycache__/report2.cpython-38.pyc index d06f59685aba1b0e7100778b3122129ce8573306..e88ea2f06cb46d8605dea67d9293e601ee149b83 100644 Binary files a/examples/example_framework/students/cs102/__pycache__/report2.cpython-38.pyc and b/examples/example_framework/students/cs102/__pycache__/report2.cpython-38.pyc differ diff --git a/examples/example_framework/students/cs102/__pycache__/report2_grade.cpython-38.pyc b/examples/example_framework/students/cs102/__pycache__/report2_grade.cpython-38.pyc index cb391caf0c1bf2175df62b4c8789fee08d1961e3..42fb3a4a526346eae8494f38ac29d254b5f30b83 100644 Binary files a/examples/example_framework/students/cs102/__pycache__/report2_grade.cpython-38.pyc and b/examples/example_framework/students/cs102/__pycache__/report2_grade.cpython-38.pyc differ diff --git a/examples/example_framework/students/cs102/report2.py b/examples/example_framework/students/cs102/report2.py index d84e9c436feeb3f9a7c62cf6c637df8e8561b522..e4d6b02b7909b3e72441c48538ae9731e63846be 100644 --- a/examples/example_framework/students/cs102/report2.py +++ b/examples/example_framework/students/cs102/report2.py @@ -1,18 +1,20 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -from unitgrade2.unitgrade2 import Report -from unitgrade2.unitgrade_helpers2 import evaluate_report_student -from unitgrade2.unitgrade2 import UTestCase, cache, hide -import random +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import UTestCase, cache + class Week1(UTestCase): """ The first question for week 1. """ - def test_add(self): """ Docstring for this method """ from cs102.homework1 import add self.assertEqualC(add(2,2)) + with self.capture() as out: + print("hello world 42") + self.assertEqual(out.numbers[0], 42) self.assertEqualC(add(-100, 5)) def test_reverse(self): @@ -34,7 +36,7 @@ class Question2(UTestCase): def test_reverse_tricky(self): ls = [2,4,8] - self.title = f"Reversing a small list containing {ls=}" + self.title = f"Reversing a small list containing {ls=}" # Titles can be set like this at any point in the function body. ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result. ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments. self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code. @@ -43,7 +45,7 @@ class Question2(UTestCase): import cs102 class Report2(Report): title = "CS 101 Report 2" - questions = [(Week1, 10), (Question2, 8) ] # Include a single question for 10 credits. + questions = [(Week1, 10), (Question2, 8) ] pack_imports = [cs102] if __name__ == "__main__": diff --git a/examples/example_framework/students/cs102/report2_grade.py b/examples/example_framework/students/cs102/report2_grade.py index 1f9a88549ff25664292eea15d23b7d7dd77c2b86..e6318de60c2c60ab0d9929d10db2a2c451f9398f 100644 --- a/examples/example_framework/students/cs102/report2_grade.py +++ b/examples/example_framework/students/cs102/report2_grade.py @@ -6,14 +6,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -63,53 +59,6 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass show_tol_err=show_tol_err) - # try: # For registering stats. - # import unitgrade_private - # import irlc.lectures - # import xlwings - # from openpyxl import Workbook - # import pandas as pd - # from collections import defaultdict - # dd = defaultdict(lambda: []) - # error_computed = [] - # for k1, (q, _) in enumerate(report.questions): - # for k2, item in enumerate(q.items): - # dd['question_index'].append(k1) - # dd['item_index'].append(k2) - # dd['question'].append(q.name) - # dd['item'].append(item.name) - # dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol) - # error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed) - # - # qstats = report.wdir + "/" + report.name + ".xlsx" - # - # if os.path.isfile(qstats): - # d_read = pd.read_excel(qstats).to_dict() - # else: - # d_read = dict() - # - # for k in range(1000): - # key = 'run_'+str(k) - # if key in d_read: - # dd[key] = list(d_read['run_0'].values()) - # else: - # dd[key] = error_computed - # break - # - # workbook = Workbook() - # worksheet = workbook.active - # for col, key in enumerate(dd.keys()): - # worksheet.cell(row=1, column=col+1).value = key - # for row, item in enumerate(dd[key]): - # worksheet.cell(row=row+2, column=col+1).value = item - # - # workbook.save(qstats) - # workbook.close() - # - # except ModuleNotFoundError as e: - # s = 234 - # pass - if question is None: print("Provisional evaluation") tabulate(table_data) @@ -161,24 +110,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -188,104 +133,28 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite) - z = 234 - # for j, item in enumerate(q.items): - # if qitem is not None and question is not None and j+1 != qitem: - # continue - # - # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles. - # # if not item.question.has_called_init_: - # start = time.time() - # - # cc = None - # if show_progress_bar: - # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] ) - # cc = ActiveProgress(t=total_estimated_time, title=q_title_print) - # from unitgrade import Capturing # DON'T REMOVE THIS LINE - # with eval('Capturing')(unmute=unmute): # Clunky import syntax is required bc. of minify issue. - # try: - # for q2 in q_with_outstanding_init: - # q2.init() - # q2.has_called_init_ = True - # - # # item.question.init() # Initialize the question. Useful for sharing resources. - # except Exception as e: - # if not passall: - # if not silent: - # print(" ") - # print("="*30) - # print(f"When initializing question {q.title} the initialization code threw an error") - # print(e) - # print("The remaining parts of this question will likely fail.") - # print("="*30) - # - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(q_title_print, end="") - # - # q_time =np.round( time.time()-start, 2) - # - # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "") - # print("=" * nL) - # q_with_outstanding_init = None - # - # # item.question = q # Set the parent question instance for later reference. - # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title) - # - # if show_progress_bar: - # cc = ActiveProgress(t=item.estimated_time, title=item_title_print) - # else: - # print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="") - # hidden = issubclass(item.__class__, Hidden) - # # if not hidden: - # # print(ss, end="") - # # sys.stdout.flush() - # start = time.time() - # - # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent) - # q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title} - # tsecs = np.round(time.time()-start, 2) - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="") - # - # if not hidden: - # ss = "PASS" if current == possible else "*** FAILED" - # if tsecs >= 0.1: - # ss += " ("+ str(tsecs) + " seconds)" - # print(ss) - - # ws, possible, obtained = upack(q_) possible = res.testsRun obtained = len(res.successes) assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f"Question {n+1} total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -300,15 +169,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -331,7 +201,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -352,7 +223,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -396,14 +267,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -416,12 +287,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -440,9 +314,9 @@ def gather_upload_to_campusnet(report, output_dir=None): if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -455,8 +329,8 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport random\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n\n def test_add(self):\n """ Docstring for this method """\n from cs102.homework1 import add\n self.assertEqualC(add(2,2))\n self.assertEqualC(add(-100, 5))\n\n def test_reverse(self):\n """ Reverse a list """ # Add a title to the test.\n from cs102.homework1 import reverse_list\n self.assertEqualC(reverse_list([1,2,3]))\n\n\nclass Question2(UTestCase):\n """ Second problem """\n @cache\n def my_reversal(self, ls):\n # The \'@cache\' decorator ensures the function is not run on the *students* computer\n # Instead the code is run on the teachers computer and the result is passed on with the\n # other pre-computed results -- i.e. this function will run regardless of how the student happens to have\n # implemented reverse_list.\n from cs102.homework1 import reverse_list\n return reverse_list(ls)\n\n def test_reverse_tricky(self):\n ls = [2,4,8]\n self.title = f"Reversing a small list containing {ls=}"\n ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result.\n ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments.\n self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code.\n\n\nimport cs102\nclass Report2(Report):\n title = "CS 101 Report 2"\n questions = [(Week1, 10), (Question2, 8) ] # Include a single question for 10 credits.\n pack_imports = [cs102]' -report1_payload = '8004959a010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c057469746c659486948c19446f63737472696e6720666f722074686973206d6574686f64946803680486948c066173736572749486947d94284b004b044b014aa1ffffff756803680486948c0474696d6594869447000000000000000068038c0c746573745f72657665727365948694680686948c0e526576657273652061206c69737494680368108694680a86947d944b005d94284b034b024b016573680368108694680e86944700000000000000008c0474696d6594470000000000000000758c095175657374696f6e32947d94288c095175657374696f6e32948c13746573745f726576657273655f747269636b799486948c066173736572749486947d944b005d94284b024b044b086573681d681e86948c057469746c659486948c2e526576657273696e67206120736d616c6c206c69737420636f6e7461696e696e67206c733d5b322c20342c20385d94681d681e86948c0474696d65948694470000000000000000681a473f5066000000000075752e' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\n\nclass Week1(UTestCase):\n """ The first question for week 1. """\n def test_add(self):\n """ Docstring for this method """\n from cs102.homework1 import add\n self.assertEqualC(add(2,2))\n with self.capture() as out:\n print("hello world 42")\n self.assertEqual(out.numbers[0], 42)\n self.assertEqualC(add(-100, 5))\n\n def test_reverse(self):\n """ Reverse a list """ # Add a title to the test.\n from cs102.homework1 import reverse_list\n self.assertEqualC(reverse_list([1,2,3]))\n\n\nclass Question2(UTestCase):\n """ Second problem """\n @cache\n def my_reversal(self, ls):\n # The \'@cache\' decorator ensures the function is not run on the *students* computer\n # Instead the code is run on the teachers computer and the result is passed on with the\n # other pre-computed results -- i.e. this function will run regardless of how the student happens to have\n # implemented reverse_list.\n from cs102.homework1 import reverse_list\n return reverse_list(ls)\n\n def test_reverse_tricky(self):\n ls = [2,4,8]\n self.title = f"Reversing a small list containing {ls=}" # Titles can be set like this at any point in the function body.\n ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result.\n ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments.\n self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code.\n\n\nimport cs102\nclass Report2(Report):\n title = "CS 101 Report 2"\n questions = [(Week1, 10), (Question2, 8) ]\n pack_imports = [cs102]' +report1_payload = '80049510010000000000007d94288c055765656b31947d94288c055765656b31948c08746573745f6164649486948c066173736572749486947d94284b004b044b014aa1ffffff7568038c0c746573745f72657665727365948694680686947d944b005d94284b034b024b0165738c0474696d6594473fe6a7e700000000758c095175657374696f6e32947d94288c095175657374696f6e32948c13746573745f726576657273655f747269636b799486948c057469746c659486948c2e526576657273696e67206120736d616c6c206c69737420636f6e7461696e696e67206c733d5b322c20342c20385d946811681286948c066173736572749486947d944b005d94284b024b044b086573680e473facac280000000075752e' name="Report2" report = source_instantiate(name, report1_source, report1_payload) diff --git a/examples/example_framework/students/cs102/unitgrade/Question2.pkl b/examples/example_framework/students/cs102/unitgrade/Question2.pkl index 634a7fbbe4bad27f24b2d894ef3c0b37c4f5dd94..b950c49faa2b91b7675ee266d63a13fe3652e3cc 100644 Binary files a/examples/example_framework/students/cs102/unitgrade/Question2.pkl and b/examples/example_framework/students/cs102/unitgrade/Question2.pkl differ diff --git a/examples/example_framework/students/cs102/unitgrade/Week1.pkl b/examples/example_framework/students/cs102/unitgrade/Week1.pkl index 7912698f036128bb2a8b616c2a66c58ac9e774e5..6b4ee502e9c67e2ff3aba1b11c4911bb88195e34 100644 Binary files a/examples/example_framework/students/cs102/unitgrade/Week1.pkl and b/examples/example_framework/students/cs102/unitgrade/Week1.pkl differ diff --git a/examples/example_jupyter/instructor/cs105/.coverage b/examples/example_jupyter/instructor/cs105/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..572452543e2092b38f99ec9afedd779fdc155f29 Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/.coverage differ diff --git a/examples/example_jupyter/instructor/cs105/Report1Jupyter_handin_18_of_18.token b/examples/example_jupyter/instructor/cs105/Report1Jupyter_handin_18_of_18.token new file mode 100644 index 0000000000000000000000000000000000000000..cfb09b6ef919a3d4c721651cb236201490461fc4 Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/Report1Jupyter_handin_18_of_18.token differ diff --git a/examples/example_jupyter/instructor/cs105/__pycache__/homework1.cpython-38.pyc b/examples/example_jupyter/instructor/cs105/__pycache__/homework1.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405a1c978c8bf2114f9b470bfd5312fb8f02188f Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_jupyter/instructor/cs105/__pycache__/report5.cpython-38.pyc b/examples/example_jupyter/instructor/cs105/__pycache__/report5.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c80ebb8aac0fa657aacc6cb4b52f65783b2e1ec8 Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/__pycache__/report5.cpython-38.pyc differ diff --git a/examples/example_jupyter/instructor/cs105/__pycache__/week2.cpython-38.pyc b/examples/example_jupyter/instructor/cs105/__pycache__/week2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d81c0675c7056e7e11ab8b96d289c407997be566 Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/__pycache__/week2.cpython-38.pyc differ diff --git a/examples/example_jupyter/instructor/cs105/deploy.py b/examples/example_jupyter/instructor/cs105/deploy.py new file mode 100644 index 0000000000000000000000000000000000000000..8acafa93b570e43f9e8315b08d76f472852d7c0e --- /dev/null +++ b/examples/example_jupyter/instructor/cs105/deploy.py @@ -0,0 +1,15 @@ +from report5 import Report1Jupyter +from unitgrade_private2.hidden_create_files import setup_grade_file_report +from snipper import snip_dir + +if __name__ == "__main__": + setup_grade_file_report(Report1Jupyter, minify=False, obfuscate=False, execute=False) + + # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet + # gather_upload_to_campusnet((Report1Flat())) + + # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper + snip_dir.snip_dir(source_dir="", dest_dir="../../students/cs105", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + + + diff --git a/examples/example_jupyter/instructor/cs105/homework1.py b/examples/example_jupyter/instructor/cs105/homework1.py new file mode 100644 index 0000000000000000000000000000000000000000..b01dcabd994961e7afe11c436c988675799c11d1 --- /dev/null +++ b/examples/example_jupyter/instructor/cs105/homework1.py @@ -0,0 +1 @@ +# This file is blank. diff --git a/examples/example_jupyter/instructor/cs105/report5.py b/examples/example_jupyter/instructor/cs105/report5.py new file mode 100644 index 0000000000000000000000000000000000000000..38e9fbdbc8ba6acc50d215293a7fe3c996e34c75 --- /dev/null +++ b/examples/example_jupyter/instructor/cs105/report5.py @@ -0,0 +1,49 @@ +from src.unitgrade2.unitgrade2 import Report, UTestCase +from src.unitgrade2 import evaluate_report_student +import homework1 +import importnb +from src.unitgrade2.unitgrade2 import Capturing2 + +file = 'week2.ipynb' +class Week1(UTestCase): + @classmethod + def setUpClass(cls) -> None: + with Capturing2(): + cls.nb = importnb.Notebook.load(file) + + def test_add(self): + self.assertEqual(Week1.nb.myfun(2,2), 4) + self.assertEqual(Week1.nb.myfun(2,4), 8) + + def test_reverse(self): + self.assertEqual(Week1.nb.var, "hello world 2") + +# Nicer: Automatically load the notebook. +class NBTestCase(UTestCase): + notebook = None + _nb = None + @classmethod + def setUpClass(cls) -> None: + with Capturing2(): + cls._nb = importnb.Notebook.load(cls.notebook) + + @property + def nb(self): + return self.__class__._nb + +class Question2(NBTestCase): + notebook = "week2.ipynb" + def test_add(self): + self.assertEqualC(self.nb.myfun(2,8)) + +class Report1Jupyter(Report): + title = "CS 105 Report 5" + questions = [(Week1, 10), + (Question2, 8) + ] # Include a single question for 10 credits. + pack_imports = [homework1] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Jupyter()) diff --git a/examples/example_jupyter/instructor/cs105/report5_grade.py b/examples/example_jupyter/instructor/cs105/report5_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..3e651780d168dfbed2ec11d73bcaf978942b53c8 --- /dev/null +++ b/examples/example_jupyter/instructor/cs105/report5_grade.py @@ -0,0 +1,336 @@ + +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport homework1\nimport importnb\n\nfile = \'week2.ipynb\'\nclass Week1(UTestCase):\n @classmethod\n def setUpClass(cls) -> None:\n with Capturing2():\n cls.nb = importnb.Notebook.load(file)\n\n def test_add(self):\n self.assertEqual(Week1.nb.myfun(2,2), 4)\n self.assertEqual(Week1.nb.myfun(2,4), 8)\n\n def test_reverse(self):\n self.assertEqual(Week1.nb.var, "hello world 2")\n\n# Nicer: Automatically load the notebook.\nclass NBTestCase(UTestCase):\n notebook = None\n _nb = None\n @classmethod\n def setUpClass(cls) -> None:\n with Capturing2():\n cls._nb = importnb.Notebook.load(cls.notebook)\n\n @property\n def nb(self):\n return self.__class__._nb\n\nclass Question2(NBTestCase):\n notebook = "week2.ipynb"\n def test_add(self):\n self.assertEqualC(self.nb.myfun(2,8))\n\nclass Report1Jupyter(Report):\n title = "CS 105 Report 5"\n questions = [(Week1, 10),\n (Question2, 8)\n ] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_payload = '8004955c000000000000007d94288c055765656b31947d948c0474696d6594473fed915600000000738c095175657374696f6e32947d942868048c08746573745f6164649486948c066173736572749486947d944b004b10736803473fcc28f40000000075752e' +name="Report1Jupyter" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) \ No newline at end of file diff --git a/examples/example_jupyter/instructor/cs105/unitgrade/Question2.pkl b/examples/example_jupyter/instructor/cs105/unitgrade/Question2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b08cef102cf660c34ffd24ee8633b7254630acac Binary files /dev/null and b/examples/example_jupyter/instructor/cs105/unitgrade/Question2.pkl differ diff --git a/examples/example_jupyter/instructor/cs105/unitgrade/Week1.pkl b/examples/example_jupyter/instructor/cs105/unitgrade/Week1.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9b6ff7ac689837f86e1b0e393993ec7acbb784e8 --- /dev/null +++ b/examples/example_jupyter/instructor/cs105/unitgrade/Week1.pkl @@ -0,0 +1 @@ +€N. \ No newline at end of file diff --git a/examples/02471/instructor/02471/week02/week2.ipynb b/examples/example_jupyter/instructor/cs105/week2.ipynb similarity index 100% rename from examples/02471/instructor/02471/week02/week2.ipynb rename to examples/example_jupyter/instructor/cs105/week2.ipynb diff --git a/examples/example_jupyter/students/cs105/.coverage b/examples/example_jupyter/students/cs105/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..572452543e2092b38f99ec9afedd779fdc155f29 Binary files /dev/null and b/examples/example_jupyter/students/cs105/.coverage differ diff --git a/examples/example_jupyter/students/cs105/__pycache__/homework1.cpython-38.pyc b/examples/example_jupyter/students/cs105/__pycache__/homework1.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405a1c978c8bf2114f9b470bfd5312fb8f02188f Binary files /dev/null and b/examples/example_jupyter/students/cs105/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_jupyter/students/cs105/__pycache__/report5.cpython-38.pyc b/examples/example_jupyter/students/cs105/__pycache__/report5.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c80ebb8aac0fa657aacc6cb4b52f65783b2e1ec8 Binary files /dev/null and b/examples/example_jupyter/students/cs105/__pycache__/report5.cpython-38.pyc differ diff --git a/examples/example_jupyter/students/cs105/__pycache__/week2.cpython-38.pyc b/examples/example_jupyter/students/cs105/__pycache__/week2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d81c0675c7056e7e11ab8b96d289c407997be566 Binary files /dev/null and b/examples/example_jupyter/students/cs105/__pycache__/week2.cpython-38.pyc differ diff --git a/examples/example_jupyter/students/cs105/homework1.py b/examples/example_jupyter/students/cs105/homework1.py new file mode 100644 index 0000000000000000000000000000000000000000..80562c267caf49f1a23cf6f582366b052c39f1e8 --- /dev/null +++ b/examples/example_jupyter/students/cs105/homework1.py @@ -0,0 +1,4 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +# This file is blank. diff --git a/examples/example_jupyter/students/cs105/report5.py b/examples/example_jupyter/students/cs105/report5.py new file mode 100644 index 0000000000000000000000000000000000000000..e91cba1106903a2126ff3378d2c313e3460ab84c --- /dev/null +++ b/examples/example_jupyter/students/cs105/report5.py @@ -0,0 +1,52 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +from src.unitgrade2.unitgrade2 import Report, UTestCase +from src.unitgrade2 import evaluate_report_student +import homework1 +import importnb +from src.unitgrade2.unitgrade2 import Capturing2 + +file = 'week2.ipynb' +class Week1(UTestCase): + @classmethod + def setUpClass(cls) -> None: + with Capturing2(): + cls.nb = importnb.Notebook.load(file) + + def test_add(self): + self.assertEqual(Week1.nb.myfun(2,2), 4) + self.assertEqual(Week1.nb.myfun(2,4), 8) + + def test_reverse(self): + self.assertEqual(Week1.nb.var, "hello world 2") + +# Nicer: Automatically load the notebook. +class NBTestCase(UTestCase): + notebook = None + _nb = None + @classmethod + def setUpClass(cls) -> None: + with Capturing2(): + cls._nb = importnb.Notebook.load(cls.notebook) + + @property + def nb(self): + return self.__class__._nb + +class Question2(NBTestCase): + notebook = "week2.ipynb" + def test_add(self): + self.assertEqualC(self.nb.myfun(2,8)) + +class Report1Jupyter(Report): + title = "CS 105 Report 5" + questions = [(Week1, 10), + (Question2, 8) + ] # Include a single question for 10 credits. + pack_imports = [homework1] + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1Jupyter()) diff --git a/examples/example_jupyter/students/cs105/report5_grade.py b/examples/example_jupyter/students/cs105/report5_grade.py new file mode 100644 index 0000000000000000000000000000000000000000..b059908becff8c87692c95e7f6116ec78b8531eb --- /dev/null +++ b/examples/example_jupyter/students/cs105/report5_grade.py @@ -0,0 +1,338 @@ +""" +Example student code. This file is automatically generated from the files in the instructor-directory +""" +import numpy as np +from tabulate import tabulate +from datetime import datetime +import pyfiglet +import unittest +import inspect +import os +import argparse +import time + +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: +To run all tests in a report: + +> python assignment1_dp.py + +To run only question 2 or question 2.1 + +> python assignment1_dp.py -q 2 +> python assignment1_dp.py -q 2.1 + +Note this scripts does not grade your report. To grade your report, use: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)') +parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result') +parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes') +parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code') +parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.') + +def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False): + args = parser.parse_args() + if question is None and args.q is not None: + question = args.q + if "." in question: + question, qitem = [int(v) for v in question.split(".")] + else: + question = int(question) + + if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file: + raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation") + + if unmute is None: + unmute = args.unmute + if passall is None: + passall = args.passall + + results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute, + show_tol_err=show_tol_err) + + + if question is None: + print("Provisional evaluation") + tabulate(table_data) + table = table_data + print(tabulate(table)) + print(" ") + + fr = inspect.getouterframes(inspect.currentframe())[1].filename + gfile = os.path.basename(fr)[:-3] + "_grade.py" + if os.path.exists(gfile): + print("Note your results have not yet been registered. \nTo register your results, please run the file:") + print(">>>", gfile) + print("In the same manner as you ran this file.") + + + return results + + +def upack(q): + # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()]) + h =[(i['w'], i['possible'], i['obtained']) for i in q.values()] + h = np.asarray(h) + return h[:,0], h[:,1], h[:,2], + +class UnitgradeTextRunner(unittest.TextTestRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + +class SequentialTestLoader(unittest.TestLoader): + def getTestCaseNames(self, testCaseClass): + test_names = super().getTestCaseNames(testCaseClass) + # testcase_methods = list(testCaseClass.__dict__.keys()) + ls = [] + for C in testCaseClass.mro(): + if issubclass(C, unittest.TestCase): + ls = list(C.__dict__.keys()) + ls + testcase_methods = ls + test_names.sort(key=testcase_methods.index) + return test_names + +def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False, + show_progress_bar=True, + show_tol_err=False, + big_header=True): + + now = datetime.now() + if big_header: + ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom") + b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) + else: + b = "Unitgrade" + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) + s = report.title + if hasattr(report, "version") and report.version is not None: + s += " version " + report.version + print(s, "(use --help for options)" if show_help_flag else "") + # print(f"Loaded answers from: ", report.computed_answers_file, "\n") + table_data = [] + t_start = time.time() + score = {} + loader = SequentialTestLoader() + + for n, (q, w) in enumerate(report.questions): + if question is not None and n+1 != question: + continue + suite = loader.loadTestsFromTestCase(q) + qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__ + q_title_print = "Question %i: %s"%(n+1, qtitle) + print(q_title_print, end="") + q.possible = 0 + q.obtained = 0 + q_ = {} # Gather score in this class. + UTextResult.q_title_print = q_title_print # Hacky + UTextResult.show_progress_bar = show_progress_bar # Hacky. + UTextResult.number = n + UTextResult.nL = report.nL + + res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) + + possible = res.testsRun + obtained = len(res.successes) + + assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun + + obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 + score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} + q.obtained = obtained + q.possible = possible + + s1 = f" * q{n+1}) Total" + s2 = f" {q.obtained}/{w}" + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) + print(" ") + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) + + ws, possible, obtained = upack(score) + possible = int( msum(possible) ) + obtained = int( msum(obtained) ) # Cast to python int + report.possible = possible + report.obtained = obtained + now = datetime.now() + dt_string = now.strftime("%H:%M:%S") + + dt = int(time.time()-t_start) + minutes = dt//60 + seconds = dt - minutes*60 + plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") + + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") + + table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) + results = {'total': (obtained, possible), 'details': score} + return results, table_data + + +from tabulate import tabulate +from datetime import datetime +import inspect +import json +import os +import bz2 +import pickle +import os + +def bzwrite(json_str, token): # to get around obfuscation issues + with getattr(bz2, 'open')(token, "wt") as f: + f.write(json_str) + +def gather_imports(imp): + resources = {} + m = imp + # for m in pack_imports: + # print(f"*** {m.__name__}") + f = m.__file__ + # dn = os.path.dirname(f) + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = str(__import__(m.__name__.split('.')[0]).__path__) + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: + top_package = os.path.dirname(m.__file__) + module_import = True + else: + top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False + + # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) + # top_package = os.path.dirname(top_package) + import zipfile + # import strea + # zipfile.ZipFile + import io + # file_like_object = io.BytesIO(my_zip_data) + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w') as zip: + # zip.write() + for root, dirs, files in os.walk(top_package): + for file in files: + if file.endswith(".py"): + fpath = os.path.join(root, file) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) + zip.write(fpath, v) + + resources['zipfile'] = zip_buffer.getvalue() + resources['top_package'] = top_package + resources['module_import'] = module_import + return resources, top_package + + if f.endswith("__init__.py"): + for root, dirs, files in os.walk(os.path.dirname(f)): + for file in files: + if file.endswith(".py"): + # print(file) + # print() + v = os.path.relpath(os.path.join(root, file), top_package) + with open(os.path.join(root, file), 'r') as ff: + resources[v] = ff.read() + else: + v = os.path.relpath(f, top_package) + with open(f, 'r') as ff: + resources[v] = ff.read() + return resources + +import argparse +parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example: + +> python report1_grade.py + +Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful. +For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run: + +> python -m course_package.report1 + +see https://docs.python.org/3.9/using/cmdline.html +""", formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('--noprogress', action="store_true", help='Disable progress bars') +parser.add_argument('--autolab', action="store_true", help='Show Autolab results') + +def gather_upload_to_campusnet(report, output_dir=None): + n = report.nL + args = parser.parse_args() + results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, + show_progress_bar=not args.noprogress, + big_header=not args.autolab) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) + # also load the source code of missing files... + + sources = {} + print("") + if not args.autolab: + if len(report.individual_imports) > 0: + print("By uploading the .token file, you verify the files:") + for m in report.individual_imports: + print(">", m.__file__) + print("Are created/modified individually by you in agreement with DTUs exam rules") + report.pack_imports += report.individual_imports + + if len(report.pack_imports) > 0: + print("Including files in upload...") + for k, m in enumerate(report.pack_imports): + nimp, top_package = gather_imports(m) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import + nimp['name'] = m.__name__ + sources[k] = nimp + # if len([k for k in nimp if k not in sources]) > 0: + print(f" * {m.__name__}") + # sources = {**sources, **nimp} + results['sources'] = sources + + if output_dir is None: + output_dir = os.getcwd() + + payload_out_base = report.__class__.__name__ + "_handin" + + obtain, possible = results['total'] + vstring = "_v"+report.version if report.version is not None else "" + + token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) + token = os.path.join(output_dir, token) + with open(token, 'wb') as f: + pickle.dump(results, f) + + if not args.autolab: + print(" ") + print("To get credit for your results, please upload the single unmodified file: ") + print(">", token) + # print("To campusnet without any modifications.") + + # print("Now time for some autolab fun") + +def source_instantiate(name, report1_source, payload): + eval("exec")(report1_source, globals()) + pl = pickle.loads(bytes.fromhex(payload)) + report = eval(name)(payload=pl, strict=True) + # report.set_payload(pl) + return report + + + +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n # print(self.questions)\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n print("q is", q())\n q()._cache_put(\'time\', q.time) # = q.time\n report_cache[q.__qualname__] = q._cache2\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n # self._stdout = sys.stdout\n # sys._stdout = io.StringIO()\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f" * q{n+1}) Total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nimport homework1\nimport importnb\n\nfile = \'week2.ipynb\'\nclass Week1(UTestCase):\n @classmethod\n def setUpClass(cls) -> None:\n with Capturing2():\n cls.nb = importnb.Notebook.load(file)\n\n def test_add(self):\n self.assertEqual(Week1.nb.myfun(2,2), 4)\n self.assertEqual(Week1.nb.myfun(2,4), 8)\n\n def test_reverse(self):\n self.assertEqual(Week1.nb.var, "hello world 2")\n\n# Nicer: Automatically load the notebook.\nclass NBTestCase(UTestCase):\n notebook = None\n _nb = None\n @classmethod\n def setUpClass(cls) -> None:\n with Capturing2():\n cls._nb = importnb.Notebook.load(cls.notebook)\n\n @property\n def nb(self):\n return self.__class__._nb\n\nclass Question2(NBTestCase):\n notebook = "week2.ipynb"\n def test_add(self):\n self.assertEqualC(self.nb.myfun(2,8))\n\nclass Report1Jupyter(Report):\n title = "CS 105 Report 5"\n questions = [(Week1, 10),\n (Question2, 8)\n ] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_payload = '8004955c000000000000007d94288c055765656b31947d948c0474696d6594473fed915600000000738c095175657374696f6e32947d942868048c08746573745f6164649486948c066173736572749486947d944b004b10736803473fcc28f40000000075752e' +name="Report1Jupyter" + +report = source_instantiate(name, report1_source, report1_payload) +output_dir = os.path.dirname(__file__) +gather_upload_to_campusnet(report, output_dir) diff --git a/examples/example_jupyter/students/cs105/unitgrade/Question2.pkl b/examples/example_jupyter/students/cs105/unitgrade/Question2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b08cef102cf660c34ffd24ee8633b7254630acac Binary files /dev/null and b/examples/example_jupyter/students/cs105/unitgrade/Question2.pkl differ diff --git a/examples/example_jupyter/students/cs105/unitgrade/Week1.pkl b/examples/example_jupyter/students/cs105/unitgrade/Week1.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9b6ff7ac689837f86e1b0e393993ec7acbb784e8 --- /dev/null +++ b/examples/example_jupyter/students/cs105/unitgrade/Week1.pkl @@ -0,0 +1 @@ +€N. \ No newline at end of file diff --git a/examples/example_jupyter/students/cs105/week2.ipynb b/examples/example_jupyter/students/cs105/week2.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6bc411a7d674f4520b58bb03f2d9f1abeb2f7476 --- /dev/null +++ b/examples/example_jupyter/students/cs105/week2.ipynb @@ -0,0 +1,69 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 2.2.1" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hello world\n", + "6\n" + ] + } + ], + "source": [ + "var = \"hello world 2\"\n", + "def myfun(a,b):\n", + " return a*b\n", + "\n", + "output = myfun(2,3) + 10\n", + "print(var)\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "z = 234 \n", + "def mymul(d):\n", + " return myfun(d,2)+1" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token b/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token index 5d34d0fd91b71eecb9ff050591857b09f382421b..1884f587f74133902500c7b2e2ea1095e067afd8 100644 Binary files a/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token and b/examples/example_simplest/instructor/cs101/Report1_handin_10_of_10.token differ diff --git a/examples/example_simplest/instructor/cs101/__pycache__/deploy.cpython-38.pyc b/examples/example_simplest/instructor/cs101/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e14e2ac2aed2a082d17689ac69828e53aeba409 Binary files /dev/null and b/examples/example_simplest/instructor/cs101/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-38.pyc b/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-38.pyc index 3b0930bdcddcda845eef8dfac07d6ab732a2473f..aed06f7cf667b51f32903bf52b3c17138bda2777 100644 Binary files a/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-38.pyc and b/examples/example_simplest/instructor/cs101/__pycache__/report1_grade.cpython-38.pyc differ diff --git a/examples/example_simplest/instructor/cs101/deploy.py b/examples/example_simplest/instructor/cs101/deploy.py index 3e9682d9aa6d9ffce1501d7826a8bd126779d75c..b51f79c1b3be32270a83a768a19602e1ffca67b9 100644 --- a/examples/example_simplest/instructor/cs101/deploy.py +++ b/examples/example_simplest/instructor/cs101/deploy.py @@ -1,16 +1,19 @@ -from report1 import Report1 +from cs101.report1 import Report1 from unitgrade_private2.hidden_create_files import setup_grade_file_report from snipper import snip_dir -import shutil +import shutil, os +wd = os.path.dirname(__file__) if __name__ == "__main__": - setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False) + setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False, bzip=False) # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper - snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + snip_dir.snip_dir(source_dir=wd+"/../cs101", dest_dir=wd+"/../../students/cs101", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) - # For my own sake, copy the homework to the other examples. - for f in ['../../../example_framework/instructor/cs102/homework1.py', '../../../example_docker/instructor/cs103/homework1.py']: - shutil.copy('homework1.py', f) + # For my own sake, copy the homework file to the other examples. + for f in ['../../../example_framework/instructor/cs102/homework1.py', + '../../../example_docker/instructor/cs103/homework1.py', + '../../../example_flat/instructor/cs101flat/homework1.py']: + shutil.copy(wd+'/homework1.py', wd+"/"+f) diff --git a/examples/example_simplest/instructor/cs101/report1.py b/examples/example_simplest/instructor/cs101/report1.py index e00853f3f03381b3f1879db5c217fee7c3ff0279..447cedb38763aa72d45a932719922815b14e7907 100644 --- a/examples/example_simplest/instructor/cs101/report1.py +++ b/examples/example_simplest/instructor/cs101/report1.py @@ -1,5 +1,5 @@ -from unitgrade2.unitgrade2 import Report -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student from cs101.homework1 import reverse_list, add import unittest diff --git a/examples/example_simplest/instructor/cs101/report1_grade.py b/examples/example_simplest/instructor/cs101/report1_grade.py index 8972ab5fd7d427147f65315d2b2b87f6dee0f6fb..7f2feaaa6268c82e0d6c7d94ea8b5927d26e9b42 100644 --- a/examples/example_simplest/instructor/cs101/report1_grade.py +++ b/examples/example_simplest/instructor/cs101/report1_grade.py @@ -4,14 +4,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -61,53 +57,6 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass show_tol_err=show_tol_err) - # try: # For registering stats. - # import unitgrade_private - # import irlc.lectures - # import xlwings - # from openpyxl import Workbook - # import pandas as pd - # from collections import defaultdict - # dd = defaultdict(lambda: []) - # error_computed = [] - # for k1, (q, _) in enumerate(report.questions): - # for k2, item in enumerate(q.items): - # dd['question_index'].append(k1) - # dd['item_index'].append(k2) - # dd['question'].append(q.name) - # dd['item'].append(item.name) - # dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol) - # error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed) - # - # qstats = report.wdir + "/" + report.name + ".xlsx" - # - # if os.path.isfile(qstats): - # d_read = pd.read_excel(qstats).to_dict() - # else: - # d_read = dict() - # - # for k in range(1000): - # key = 'run_'+str(k) - # if key in d_read: - # dd[key] = list(d_read['run_0'].values()) - # else: - # dd[key] = error_computed - # break - # - # workbook = Workbook() - # worksheet = workbook.active - # for col, key in enumerate(dd.keys()): - # worksheet.cell(row=1, column=col+1).value = key - # for row, item in enumerate(dd[key]): - # worksheet.cell(row=row+2, column=col+1).value = item - # - # workbook.save(qstats) - # workbook.close() - # - # except ModuleNotFoundError as e: - # s = 234 - # pass - if question is None: print("Provisional evaluation") tabulate(table_data) @@ -159,24 +108,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -186,104 +131,28 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite) - z = 234 - # for j, item in enumerate(q.items): - # if qitem is not None and question is not None and j+1 != qitem: - # continue - # - # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles. - # # if not item.question.has_called_init_: - # start = time.time() - # - # cc = None - # if show_progress_bar: - # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] ) - # cc = ActiveProgress(t=total_estimated_time, title=q_title_print) - # from unitgrade import Capturing # DON'T REMOVE THIS LINE - # with eval('Capturing')(unmute=unmute): # Clunky import syntax is required bc. of minify issue. - # try: - # for q2 in q_with_outstanding_init: - # q2.init() - # q2.has_called_init_ = True - # - # # item.question.init() # Initialize the question. Useful for sharing resources. - # except Exception as e: - # if not passall: - # if not silent: - # print(" ") - # print("="*30) - # print(f"When initializing question {q.title} the initialization code threw an error") - # print(e) - # print("The remaining parts of this question will likely fail.") - # print("="*30) - # - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(q_title_print, end="") - # - # q_time =np.round( time.time()-start, 2) - # - # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "") - # print("=" * nL) - # q_with_outstanding_init = None - # - # # item.question = q # Set the parent question instance for later reference. - # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title) - # - # if show_progress_bar: - # cc = ActiveProgress(t=item.estimated_time, title=item_title_print) - # else: - # print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="") - # hidden = issubclass(item.__class__, Hidden) - # # if not hidden: - # # print(ss, end="") - # # sys.stdout.flush() - # start = time.time() - # - # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent) - # q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title} - # tsecs = np.round(time.time()-start, 2) - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="") - # - # if not hidden: - # ss = "PASS" if current == possible else "*** FAILED" - # if tsecs >= 0.1: - # ss += " ("+ str(tsecs) + " seconds)" - # print(ss) - - # ws, possible, obtained = upack(q_) possible = res.testsRun obtained = len(res.successes) assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f"Question {n+1} total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -298,15 +167,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -329,7 +199,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -350,7 +221,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -394,14 +265,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -414,12 +285,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -438,9 +312,9 @@ def gather_upload_to_campusnet(report, output_dir=None): if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -453,7 +327,7 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n # print("Bad output\\n\\n")\n\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' name="Report1" diff --git a/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token b/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token deleted file mode 100644 index 5ccd4e5495ad2bbfe4c3cc09832916b356f0d31b..0000000000000000000000000000000000000000 Binary files a/examples/example_simplest/students/cs101/Report1_handin_0_of_10.token and /dev/null differ diff --git a/examples/example_simplest/students/cs101/__pycache__/deploy.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/deploy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e14e2ac2aed2a082d17689ac69828e53aeba409 Binary files /dev/null and b/examples/example_simplest/students/cs101/__pycache__/deploy.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc index 1149b3ace2379ce11502c3e4633382e7f8d4950a..7dbbdac7ae33a6d3d1764a740a5f47ff3e00b252 100644 Binary files a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc and b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc index d7c0fcd96cc7cec28f9155fb61bec8c3bb6e00df..83e9e30d8000e629914b0160acf822078d8a3b78 100644 Binary files a/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc and b/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-38.pyc index 0aeda2d66a25eaae02e31a9c629a9a2887fb7dbd..aed06f7cf667b51f32903bf52b3c17138bda2777 100644 Binary files a/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-38.pyc and b/examples/example_simplest/students/cs101/__pycache__/report1_grade.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/report1.py b/examples/example_simplest/students/cs101/report1.py index 8e5dfca2a42c1fcae00f20cd81d3a64221b85331..9e9fce79508076cb96a6e2ae2d864edc882adb3d 100644 --- a/examples/example_simplest/students/cs101/report1.py +++ b/examples/example_simplest/students/cs101/report1.py @@ -1,8 +1,8 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -from unitgrade2.unitgrade2 import Report -from unitgrade2.unitgrade_helpers2 import evaluate_report_student +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student from cs101.homework1 import reverse_list, add import unittest @@ -13,7 +13,6 @@ class Week1(unittest.TestCase): def test_reverse(self): self.assertEqual(reverse_list([1,2,3]), [3,2,1]) - # print("Bad output\n\n") import cs101 diff --git a/examples/example_simplest/students/cs101/report1_grade.py b/examples/example_simplest/students/cs101/report1_grade.py index efb1981670ec07e36b2088237d84a7406a9a507f..c244e7936e90875809dbfd58e7bb5f158ae07dd2 100644 --- a/examples/example_simplest/students/cs101/report1_grade.py +++ b/examples/example_simplest/students/cs101/report1_grade.py @@ -6,14 +6,10 @@ from tabulate import tabulate from datetime import datetime import pyfiglet import unittest - import inspect import os import argparse -import sys import time -import threading # don't import Thread bc. of minify issue. -import tqdm # don't do from tqdm import tqdm because of minify-issue parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: To run all tests in a report: @@ -63,53 +59,6 @@ def evaluate_report_student(report, question=None, qitem=None, unmute=None, pass show_tol_err=show_tol_err) - # try: # For registering stats. - # import unitgrade_private - # import irlc.lectures - # import xlwings - # from openpyxl import Workbook - # import pandas as pd - # from collections import defaultdict - # dd = defaultdict(lambda: []) - # error_computed = [] - # for k1, (q, _) in enumerate(report.questions): - # for k2, item in enumerate(q.items): - # dd['question_index'].append(k1) - # dd['item_index'].append(k2) - # dd['question'].append(q.name) - # dd['item'].append(item.name) - # dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol) - # error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed) - # - # qstats = report.wdir + "/" + report.name + ".xlsx" - # - # if os.path.isfile(qstats): - # d_read = pd.read_excel(qstats).to_dict() - # else: - # d_read = dict() - # - # for k in range(1000): - # key = 'run_'+str(k) - # if key in d_read: - # dd[key] = list(d_read['run_0'].values()) - # else: - # dd[key] = error_computed - # break - # - # workbook = Workbook() - # worksheet = workbook.active - # for col, key in enumerate(dd.keys()): - # worksheet.cell(row=1, column=col+1).value = key - # for row, item in enumerate(dd[key]): - # worksheet.cell(row=row+2, column=col+1).value = item - # - # workbook.save(qstats) - # workbook.close() - # - # except ModuleNotFoundError as e: - # s = 234 - # pass - if question is None: print("Provisional evaluation") tabulate(table_data) @@ -161,24 +110,20 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] ) else: b = "Unitgrade" - print(b + " v" + __version__) dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("Started: " + dt_string) + print(b + " v" + __version__ + ", started: " + dt_string+ "\n") + # print("Started: " + dt_string) s = report.title if hasattr(report, "version") and report.version is not None: s += " version " + report.version - print("Evaluating " + s, "(use --help for options)" if show_help_flag else "") + print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] - nL = 80 t_start = time.time() score = {} loader = SequentialTestLoader() for n, (q, w) in enumerate(report.questions): - # q = q() - # q_hidden = False - # q_hidden = issubclass(q.__class__, Hidden) if question is not None and n+1 != question: continue suite = loader.loadTestsFromTestCase(q) @@ -188,104 +133,28 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa q.possible = 0 q.obtained = 0 q_ = {} # Gather score in this class. - # unittest.Te - # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_] UTextResult.q_title_print = q_title_print # Hacky UTextResult.show_progress_bar = show_progress_bar # Hacky. UTextResult.number = n + UTextResult.nL = report.nL res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite) - # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite) - z = 234 - # for j, item in enumerate(q.items): - # if qitem is not None and question is not None and j+1 != qitem: - # continue - # - # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles. - # # if not item.question.has_called_init_: - # start = time.time() - # - # cc = None - # if show_progress_bar: - # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] ) - # cc = ActiveProgress(t=total_estimated_time, title=q_title_print) - # from unitgrade import Capturing # DON'T REMOVE THIS LINE - # with eval('Capturing')(unmute=unmute): # Clunky import syntax is required bc. of minify issue. - # try: - # for q2 in q_with_outstanding_init: - # q2.init() - # q2.has_called_init_ = True - # - # # item.question.init() # Initialize the question. Useful for sharing resources. - # except Exception as e: - # if not passall: - # if not silent: - # print(" ") - # print("="*30) - # print(f"When initializing question {q.title} the initialization code threw an error") - # print(e) - # print("The remaining parts of this question will likely fail.") - # print("="*30) - # - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(q_title_print, end="") - # - # q_time =np.round( time.time()-start, 2) - # - # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "") - # print("=" * nL) - # q_with_outstanding_init = None - # - # # item.question = q # Set the parent question instance for later reference. - # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title) - # - # if show_progress_bar: - # cc = ActiveProgress(t=item.estimated_time, title=item_title_print) - # else: - # print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="") - # hidden = issubclass(item.__class__, Hidden) - # # if not hidden: - # # print(ss, end="") - # # sys.stdout.flush() - # start = time.time() - # - # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent) - # q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title} - # tsecs = np.round(time.time()-start, 2) - # if show_progress_bar: - # cc.terminate() - # sys.stdout.flush() - # print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="") - # - # if not hidden: - # ss = "PASS" if current == possible else "*** FAILED" - # if tsecs >= 0.1: - # ss += " ("+ str(tsecs) + " seconds)" - # print(ss) - - # ws, possible, obtained = upack(q_) possible = res.testsRun obtained = len(res.successes) assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun - # possible = int(ws @ possible) - # obtained = int(ws @ obtained) - # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0 - obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle} q.obtained = obtained q.possible = possible - s1 = f"*** Question q{n+1}" + s1 = f"Question {n+1} total" s2 = f" {q.obtained}/{w}" - print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 ) + print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 ) print(" ") - table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"]) + table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"]) ws, possible, obtained = upack(score) possible = int( msum(possible) ) @@ -300,15 +169,16 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa seconds = dt - minutes*60 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "") - print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")") + dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")", + last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL) + + # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total") table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ]) results = {'total': (obtained, possible), 'details': score} return results, table_data - - from tabulate import tabulate from datetime import datetime import inspect @@ -331,7 +201,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -352,7 +223,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -396,14 +267,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -416,12 +287,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -440,9 +314,9 @@ def gather_upload_to_campusnet(report, output_dir=None): if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") @@ -455,7 +329,7 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\n# from . import cache_read\nimport unittest\nimport numpy as np\nimport sys\nfrom io import StringIO\nimport collections\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\nimport os\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 80 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n root_dir = self.pack_imports[0].__path__._path[0]\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q,_) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar",show_progress_bar=True):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n # if hasattr(self, \'cc\'):\n # self.cc.terminate()\n # self.cc_terminate(success=False)\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n # if self.showAll:\n # self.stream.writeln("FAIL")\n # elif self.dots:\n # self.stream.write(\'F\')\n # self.stream.flush()\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n self.cc_terminate()\n\n\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(ss))), end="")\n # current = 1\n # possible = 1\n # current == possible\n ss = "PASS" if success else "FAILED"\n if tsecs >= 0.1:\n ss += " (" + str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n j =self.testsRun\n self.testsRun += 1\n # print("Starting the test...")\n # show_progress_bar = True\n n = UTextResult.number\n\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n\n item_title = test.shortDescription() # Better for printing (get from cache).\n # test.countTestCases()\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n # print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)) )\n # key = (self.cache_id(), \'@cache\')\n # if self._cache_contains[key]\n\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n\n @classmethod\n def question_title(cls):\n return cls.__doc__.splitlines()[0].strip() if cls.__doc__ != None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd == None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n # self._testMethodDoc.strip().splitlines()[0].strip()\n sd = self.shortDescriptionStandard()\n title = self._cache_get( (self.cache_id(), \'title\'), sd )\n return title if title != None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n # def _callSetUp(self):\n # # Always run before method is called.\n # print("asdf")\n # pass\n # @classmethod\n # def setUpClass(cls):\n # # self._cache_put((self.cache_id(), \'title\'), value)\n # cls.reset()\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc != None:\n # Ensure the cache is eventually updated with the right docstring.\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard() )\n # Fix temp cache here (for using the @cache decorator)\n self._cache2[ (self.cache_id(), \'assert\') ] = {}\n\n res = testMethod()\n elapsed = time.time() - t\n # self._cache_put( (self.cache_id(), \'title\'), self.shortDescription() )\n\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n # def unique_cache_id(self):\n # k0 = self.cache_id()\n # # key = ()\n # i = 0\n # for i in itertools.count():\n # # key = k0 + (i,)\n # if i not in self._cache_get( (k0, \'assert\') ):\n # break\n # return i\n # return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n # self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n #\n # def _cache2_contains(self, key):\n # print("Is this needed?")\n # self._ensure_cache_exists()\n # return key in self.__class__._cache2\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n cache = self._cache_get(key, {})\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, first)\n assert_fun(first, _expected, *args, **kwargs)\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n # print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n # q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n # print("Bad output\\n\\n")\n\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n"""\nimport numpy as np\nimport sys\nimport re\nimport threading\nimport tqdm\nimport pickle\nimport os\nfrom io import StringIO\nimport io\nfrom unittest.runner import _WritelnDecorator\nfrom typing import Any\nimport inspect\nimport textwrap\nimport colorama\nfrom colorama import Fore\nfrom functools import _make_key, RLock\nfrom collections import namedtuple\nimport unittest\nimport time\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ncolorama.init(autoreset=True) # auto resets your settings after every output\n\ndef gprint(s):\n print(f"{Fore.GREEN}{s}")\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\ndef setup_dir_by_class(C, base_dir):\n name = C.__class__.__name__\n return base_dir, name\n\n\nclass Logger(object):\n def __init__(self, buffer):\n assert False\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\n\nclass Capturing(list):\n def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n self._stdout = stdout\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout if self._stdout == None else self._stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n def __exit__(self, *args):\n lines = self._stringio.getvalue().splitlines()\n txt = "\\n".join(lines)\n numbers = extract_numbers(txt)\n self.extend(lines)\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n self.output = txt\n self.numbers = numbers\n\n\n# @classmethod\n# class OrderedClassMembers(type):\n# def __prepare__(self, name, bases):\n# assert False\n# return collections.OrderedDict()\n#\n# def __new__(self, name, bases, classdict):\n# ks = list(classdict.keys())\n# for b in bases:\n# ks += b.__ordered__\n# classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n# return type.__new__(self, name, bases, classdict)\n\n\nclass Report:\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n nL = 120 # Maximum line width\n\n @classmethod\n def reset(cls):\n for (q, _) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n @classmethod\n def mfile(clc):\n return inspect.getfile(clc)\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def _import_base_relative(self):\n if hasattr(self.pack_imports[0], \'__path__\'):\n root_dir = self.pack_imports[0].__path__._path[0]\n else:\n root_dir = self.pack_imports[0].__file__\n\n root_dir = os.path.dirname(root_dir)\n relative_path = os.path.relpath(self._file(), root_dir)\n modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n return root_dir, relative_path, modules\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n for (q, _) in self.questions:\n q.nL = self.nL # Set maximum line length.\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n loader = unittest.TestLoader()\n for q, _ in self.questions:\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self, with_coverage=False):\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = True\n q._report = self\n\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\': True}\n if with_coverage:\n for q, _ in self.questions:\n q._with_coverage = False\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct + 1)\n if i > 0 and l.find("|", i + 1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, too many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None):\n if file == None:\n file = sys.stdout\n self.file = file\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.01\n self.n = int(np.round(self.t / self.dt))\n self.show_progress_bar = show_progress_bar\n self.pbar = None\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n if self.show_progress_bar:\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n if not self._running:\n raise Exception("Stopping a stopped progress bar. ")\n self._running = False\n if self.show_progress_bar:\n self.thread.join()\n if self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar = None\n\n self.file.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n\n for _ in range(self.n - 1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n if file == None:\n file = sys.stdout\n\n # ss = self.item_title_print\n # state = "PASS" if success else "FAILED"\n dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n # if self.show_progress_bar or True:\n print(first + dot_parts, end="", file=file)\n # else:\n # print(dot_parts, end="", file=self.cc.file)\n last += extra\n # if tsecs >= 0.5:\n # state += " (" + str(tsecs) + " seconds)"\n print(last, file=file)\n\n\nclass UTextResult(unittest.TextTestResult):\n nL = 80\n number = -1 # HAcky way to set question number.\n show_progress_bar = True\n cc = None\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n def addError(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addFailure(self, test, err):\n super(unittest.TextTestResult, self).addFailure(test, err)\n self.cc_terminate(success=False)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n self.successes.append(test)\n self.cc_terminate()\n\n def cc_terminate(self, success=True):\n if self.show_progress_bar or True:\n tsecs = np.round(self.cc.terminate(), 2)\n self.cc.file.flush()\n ss = self.item_title_print\n\n state = "PASS" if success else "FAILED"\n\n dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n if self.show_progress_bar or True:\n print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n else:\n print(dot_parts, end="", file=self.cc.file)\n\n if tsecs >= 0.5:\n state += " (" + str(tsecs) + " seconds)"\n print(state, file=self.cc.file)\n\n def startTest(self, test):\n # j =self.testsRun\n self.testsRun += 1\n # item_title = self.getDescription(test)\n item_title = test.shortDescription() # Better for printing (get from cache).\n if item_title == None:\n # For unittest framework where getDescription may return None.\n item_title = self.getDescription(test)\n self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n estimated_time = 10\n if self.show_progress_bar or True:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar, file=sys.stdout)\n else:\n print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n self._stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n def stopTest(self, test):\n sys.stdout = self._stdout\n super().stopTest(test)\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 1\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print, show_progress_bar=self.show_progress_bar)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass is None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n if self.show_progress_bar:\n print(self.cc.title, end="")\n print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n stream = io.StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n # stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n\n return wrapper\n\n\ndef get_hints(ss):\n if ss == None:\n return None\n try:\n ss = textwrap.dedent(ss)\n ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n hints = ["hints:", ]\n j = np.argmax([ss.lower().find(h) for h in hints])\n h = hints[j]\n ss = ss[ss.find(h) + len(h) + 1:]\n ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n ss = textwrap.dedent(ss)\n ss = ss.strip()\n return ss\n except Exception as e:\n print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache. Ensures method always produce same result.\n _cache2 = None # User-written cache.\n _with_coverage = False\n _report = None # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n\n def capture(self):\n if hasattr(self, \'_stdout\') and self._stdout is not None:\n file = self._stdout\n else:\n file = sys.stdout\n return Capturing2(stdout=file)\n\n @classmethod\n def question_title(cls):\n """ Return the question title """\n return cls.__doc__.strip().splitlines()[0].strip() if cls.__doc__ is not None else cls.__qualname__\n\n @classmethod\n def reset(cls):\n print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.")\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _callSetUp(self):\n if self._with_coverage:\n if not hasattr(self._report, \'covcache\'):\n self._report.covcache = {}\n import coverage\n self.cov = coverage.Coverage()\n self.cov.start()\n self.setUp()\n\n def _callTearDown(self):\n self.tearDown()\n if self._with_coverage:\n from pathlib import Path\n from snipper import snipper\n self.cov.stop()\n data = self.cov.get_data()\n base, _, _ = self._report._import_base_relative()\n for file in data.measured_files():\n file = os.path.normpath(file)\n root = Path(base)\n child = Path(file)\n if root in child.parents:\n with open(child, \'r\') as f:\n s = f.read()\n lines = s.splitlines()\n garb = \'GARBAGE\'\n\n lines2 = snipper.censor_code(lines, keep=True)\n assert len(lines) == len(lines2)\n\n for l in data.contexts_by_lineno(file):\n if lines2[l].strip() == garb:\n if self.cache_id() not in self._report.covcache:\n self._report.covcache[self.cache_id()] = {}\n\n rel = os.path.relpath(child, root)\n cc = self._report.covcache[self.cache_id()]\n j = 0\n for j in range(l, -1, -1):\n if "def" in lines2[j] or "class" in lines2[j]:\n break\n from snipper.snipper import gcoms\n fun = lines2[j]\n comments, _ = gcoms("\\n".join(lines2[j:l]))\n if rel not in cc:\n cc[rel] = {}\n cc[rel][fun] = (l, "\\n".join(comments))\n self._cache_put((self.cache_id(), \'coverage\'), self._report.covcache)\n\n def shortDescriptionStandard(self):\n sd = super().shortDescription()\n if sd is None:\n sd = self._testMethodName\n return sd\n\n def shortDescription(self):\n sd = self.shortDescriptionStandard()\n title = self._cache_get((self.cache_id(), \'title\'), sd)\n return title if title is not None else sd\n\n @property\n def title(self):\n return self.shortDescription()\n\n @title.setter\n def title(self, value):\n self._cache_put((self.cache_id(), \'title\'), value)\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome is None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n self._ensure_cache_exists() # Make sure cache is there.\n if self._testMethodDoc is not None:\n self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n self._cache2[(self.cache_id(), \'assert\')] = {}\n res = testMethod()\n elapsed = time.time() - t\n self._get_outcome()[self.cache_id()] = res\n self._cache_put((self.cache_id(), "time"), elapsed)\n\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return c, m\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self._assert_cache_index = 0\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def wrap_assert(self, assert_fun, first, *args, **kwargs):\n # sys.stdout = self._stdout\n key = (self.cache_id(), \'assert\')\n if not self._cache_contains(key):\n print("Warning, framework missing", key)\n self.__class__._cache[\n key] = {} # A new dict. We manually insert it because we have to use that the dict is mutable.\n cache = self._cache_get(key)\n id = self._assert_cache_index\n if not id in cache:\n print("Warning, framework missing cache index", key, "id =", id)\n _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n\n # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n cache[id] = first\n self._cache_put(key, cache)\n self._assert_cache_index += 1\n assert_fun(first, _expected, *args, **kwargs)\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n self.wrap_assert(self.assertEqual, first, msg)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__)) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache is not None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n if os.path.exists(cfile):\n try:\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n except Exception as e:\n print("Bad cache", cfile)\n print(e)\n else:\n print("Warning! data file not found", cfile)\n\n def _feedErrorsToResult(self, result, errors):\n """ Use this to show hints on test failure. """\n if not isinstance(result, UTextResult):\n er = [e for e, v in errors if v != None]\n\n if len(er) > 0:\n hints = []\n key = (self.cache_id(), \'coverage\')\n if self._cache_contains(key):\n CC = self._cache_get(key)\n for id in CC:\n if id == self.cache_id():\n cl, m = id\n gprint(f"> An error occured while solving: {cl}.{m}. The files/methods you need to edit are:") # For the test {id} in {file} you should edit:")\n for file in CC[id]:\n rec = CC[id][file]\n gprint(f"> * {file}")\n for l in rec:\n _, comments = CC[id][file][l]\n hint = get_hints(comments)\n\n if hint != None:\n hints.append(hint)\n gprint(f"> - {l}")\n\n er = er[0]\n doc = er._testMethodDoc\n if doc is not None:\n hint = get_hints(er._testMethodDoc)\n if hint is not None:\n hints = [hint] + hints\n if len(hints) > 0:\n gprint("> Hints:")\n gprint(textwrap.indent("\\n".join(hints), "> "))\n\n super()._feedErrorsToResult(result, errors)\n\n def startTestRun(self):\n # print("asdfsdaf 11", file=sys.stderr)\n super().startTestRun()\n # print("asdfsdaf")\n\n def _callTestMethod(self, method):\n # print("asdfsdaf")\n super()._callTestMethod(method)\n\n\ndef hide(func):\n return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n# 817\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\nclass SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n # testcase_methods = list(testCaseClass.__dict__.keys())\n ls = []\n for C in testCaseClass.mro():\n if issubclass(C, unittest.TestCase):\n ls = list(C.__dict__.keys()) + ls\n testcase_methods = ls\n test_names.sort(key=testcase_methods.index)\n return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False,\n big_header=True):\n\n now = datetime.now()\n if big_header:\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n else:\n b = "Unitgrade"\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n # print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print(s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n t_start = time.time()\n score = {}\n loader = SequentialTestLoader()\n\n for n, (q, w) in enumerate(report.questions):\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n UTextResult.q_title_print = q_title_print # Hacky\n UTextResult.show_progress_bar = show_progress_bar # Hacky.\n UTextResult.number = n\n UTextResult.nL = report.nL\n\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n\n possible = res.testsRun\n obtained = len(res.successes)\n\n assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun\n\n obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"Question {n+1} total"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n\n if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'): # Importing a simple file: m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package)\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\', action="store_true", help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\', action="store_true", help=\'Show Autolab results\')\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = report.nL\n args = parser.parse_args()\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n show_progress_bar=not args.noprogress,\n big_header=not args.autolab)\n # print(" ")\n # print("="*n)\n # print("Final evaluation")\n # print(tabulate(table_data))\n # also load the source code of missing files...\n\n sources = {}\n print("")\n if not args.autolab:\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n _, report_relative_location, module_import = report._import_base_relative()\n\n # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'report_module_specification\'] = module_import\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f" * {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n if not args.autolab:\n print(" ")\n print("To get credit for your results, please upload the single unmodified file: ")\n print(">", token)\n # print("To campusnet without any modifications.")\n\n # print("Now time for some autolab fun")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom cs101.homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' name="Report1" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..b5a3c468d9e85e7fa7469c3a90d47b48ab93e54a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel" +] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/pytransform/__init__.py b/pytransform/__init__.py deleted file mode 100644 index f656a22d3028cd47aedc8a79e8c9607a847957ac..0000000000000000000000000000000000000000 --- a/pytransform/__init__.py +++ /dev/null @@ -1,454 +0,0 @@ -# These module alos are used by protection code, so that protection -# code needn't import anything -import os -import platform -import sys -import struct - -# Because ctypes is new from Python 2.5, so pytransform doesn't work -# before Python 2.5 -# -from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \ - pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE -from fnmatch import fnmatch - -# -# Support Platforms -# -plat_path = 'platforms' - -plat_table = ( - ('windows', ('windows', 'cygwin-*')), - ('darwin', ('darwin', 'ios')), - ('linux', ('linux*',)), - ('freebsd', ('freebsd*', 'openbsd*')), - ('poky', ('poky',)), -) - -arch_table = ( - ('x86', ('i?86', )), - ('x86_64', ('x64', 'x86_64', 'amd64', 'intel')), - ('arm', ('armv5',)), - ('armv6', ('armv6l',)), - ('armv7', ('armv7l',)), - ('ppc64', ('ppc64le',)), - ('mips32', ('mips',)), - ('aarch32', ('aarch32',)), - ('aarch64', ('aarch64', 'arm64')) -) - -# -# Hardware type -# -HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5) - -# -# Global -# -_pytransform = None - - -class PytransformError(Exception): - pass - - -def dllmethod(func): - def wrap(*args, **kwargs): - return func(*args, **kwargs) - return wrap - - -@dllmethod -def version_info(): - prototype = PYFUNCTYPE(py_object) - dlfunc = prototype(('version_info', _pytransform)) - return dlfunc() - - -@dllmethod -def init_pytransform(): - major, minor = sys.version_info[0:2] - # Python2.5 no sys.maxsize but sys.maxint - # bitness = 64 if sys.maxsize > 2**32 else 32 - prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p) - init_module = prototype(('init_module', _pytransform)) - ret = init_module(major, minor, pythonapi._handle) - if (ret & 0xF000) == 0x1000: - raise PytransformError('Initialize python wrapper failed (%d)' - % (ret & 0xFFF)) - return ret - - -@dllmethod -def init_runtime(): - prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int) - _init_runtime = prototype(('init_runtime', _pytransform)) - return _init_runtime(0, 0, 0, 0) - - -@dllmethod -def encrypt_code_object(pubkey, co, flags, suffix=''): - _pytransform.set_option(6, suffix.encode()) - prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int) - dlfunc = prototype(('encrypt_code_object', _pytransform)) - return dlfunc(pubkey, co, flags) - - -@dllmethod -def generate_license_file(filename, priname, rcode, start=-1, count=1): - prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int) - dlfunc = prototype(('generate_project_license_files', _pytransform)) - return dlfunc(filename.encode(), priname.encode(), rcode.encode(), - start, count) if sys.version_info[0] == 3 \ - else dlfunc(filename, priname, rcode, start, count) - - -@dllmethod -def generate_license_key(prikey, keysize, rcode): - prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p) - dlfunc = prototype(('generate_license_key', _pytransform)) - return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \ - else dlfunc(prikey, keysize, rcode.encode()) - - -@dllmethod -def get_registration_code(): - prototype = PYFUNCTYPE(py_object) - dlfunc = prototype(('get_registration_code', _pytransform)) - return dlfunc() - - -@dllmethod -def get_expired_days(): - prototype = PYFUNCTYPE(py_object) - dlfunc = prototype(('get_expired_days', _pytransform)) - return dlfunc() - - -@dllmethod -def clean_obj(obj, kind): - prototype = PYFUNCTYPE(c_int, py_object, c_int) - dlfunc = prototype(('clean_obj', _pytransform)) - return dlfunc(obj, kind) - - -def clean_str(*args): - tdict = { - 'str': 0, - 'bytearray': 1, - 'unicode': 2 - } - for obj in args: - k = tdict.get(type(obj).__name__) - if k is None: - raise RuntimeError('Can not clean object: %s' % obj) - clean_obj(obj, k) - - -def get_hd_info(hdtype, name=None): - if hdtype not in range(HT_DOMAIN + 1): - raise RuntimeError('Invalid parameter hdtype: %s' % hdtype) - size = 256 - t_buf = c_char * size - buf = t_buf() - cname = c_char_p(0 if name is None - else name.encode('utf-8') if hasattr('name', 'encode') - else name) - if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1): - raise PytransformError('Get hardware information failed') - return buf.value.decode() - - -def show_hd_info(): - return _pytransform.show_hd_info() - - -def assert_armored(*names): - prototype = PYFUNCTYPE(py_object, py_object) - dlfunc = prototype(('assert_armored', _pytransform)) - - def wrapper(func): - def wrap_execute(*args, **kwargs): - dlfunc(names) - return func(*args, **kwargs) - return wrap_execute - return wrapper - - -def get_license_info(): - info = { - 'ISSUER': None, - 'EXPIRED': None, - 'HARDDISK': None, - 'IFMAC': None, - 'IFIPV4': None, - 'DOMAIN': None, - 'DATA': None, - 'CODE': None, - } - rcode = get_registration_code().decode() - if rcode.startswith('*VERSION:'): - index = rcode.find('\n') - info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '') - rcode = rcode[index+1:] - - index = 0 - if rcode.startswith('*TIME:'): - from time import ctime - index = rcode.find('\n') - info['EXPIRED'] = ctime(float(rcode[6:index])) - index += 1 - - if rcode[index:].startswith('*FLAGS:'): - index += len('*FLAGS:') + 1 - info['FLAGS'] = ord(rcode[index - 1]) - - prev = None - start = index - for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']: - index = rcode.find('*%s:' % k) - if index > -1: - if prev is not None: - info[prev] = rcode[start:index] - prev = k - start = index + len(k) + 2 - info['CODE'] = rcode[start:] - i = info['CODE'].find(';') - if i > 0: - info['DATA'] = info['CODE'][i+1:] - info['CODE'] = info['CODE'][:i] - return info - - -def get_license_code(): - return get_license_info()['CODE'] - - -def get_user_data(): - return get_license_info()['DATA'] - - -def _match_features(patterns, s): - for pat in patterns: - if fnmatch(s, pat): - return True - - -def _gnu_get_libc_version(): - try: - prototype = CFUNCTYPE(c_char_p) - ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))() - return ver.decode().split('.') - except Exception: - pass - - -def format_platform(platid=None): - if platid: - return os.path.normpath(platid) - - plat = platform.system().lower() - mach = platform.machine().lower() - - for alias, platlist in plat_table: - if _match_features(platlist, plat): - plat = alias - break - - if plat == 'linux': - cname, cver = platform.libc_ver() - if cname == 'musl': - plat = 'musl' - elif cname == 'libc': - plat = 'android' - elif cname == 'glibc': - v = _gnu_get_libc_version() - if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214: - plat = 'centos6' - - for alias, archlist in arch_table: - if _match_features(archlist, mach): - mach = alias - break - - if plat == 'windows' and mach == 'x86_64': - bitness = struct.calcsize('P'.encode()) * 8 - if bitness == 32: - mach = 'x86' - - return os.path.join(plat, mach) - - -# Load _pytransform library -def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0): - path = os.path.dirname(__file__) if path is None \ - else os.path.normpath(path) - - plat = platform.system().lower() - name = '_pytransform' + suffix - if plat == 'linux': - filename = os.path.abspath(os.path.join(path, name + '.so')) - elif plat == 'darwin': - filename = os.path.join(path, name + '.dylib') - elif plat == 'windows': - filename = os.path.join(path, name + '.dll') - elif plat == 'freebsd': - filename = os.path.join(path, name + '.so') - else: - raise PytransformError('Platform %s not supported' % plat) - - if platid is not None and os.path.isfile(platid): - filename = platid - elif platid is not None or not os.path.exists(filename) or not is_runtime: - libpath = platid if platid is not None and os.path.isabs(platid) else \ - os.path.join(path, plat_path, format_platform(platid)) - filename = os.path.join(libpath, os.path.basename(filename)) - - if not os.path.exists(filename): - raise PytransformError('Could not find "%s"' % filename) - - try: - m = cdll.LoadLibrary(filename) - except Exception as e: - if sys.flags.debug: - print('Load %s failed:\n%s' % (filename, e)) - raise - - # Removed from v4.6.1 - # if plat == 'linux': - # m.set_option(-1, find_library('c').encode()) - - if not os.path.abspath('.') == os.path.abspath(path): - m.set_option(1, path.encode() if sys.version_info[0] == 3 else path) - - # Required from Python3.6 - m.set_option(2, sys.byteorder.encode()) - - if sys.flags.debug: - m.set_option(3, c_char_p(1)) - m.set_option(4, c_char_p(not is_runtime)) - - # Disable advanced mode by default - m.set_option(5, c_char_p(not advanced)) - - # Set suffix for private package - if suffix: - m.set_option(6, suffix.encode()) - - return m - - -def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0): - global _pytransform - _pytransform = _load_library(path, is_runtime, platid, suffix, advanced) - return init_pytransform() - - -def pyarmor_runtime(path=None, suffix='', advanced=0): - if _pytransform is not None: - return - - try: - pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced) - init_runtime() - except Exception as e: - if sys.flags.debug or hasattr(sys, '_catch_pyarmor'): - raise - sys.stderr.write("%s\n" % str(e)) - sys.exit(1) - - -# ---------------------------------------------------------- -# End of pytransform -# ---------------------------------------------------------- - -# -# Not available from v5.6 -# - - -def generate_capsule(licfile): - prikey, pubkey, prolic = _generate_project_capsule() - capkey, newkey = _generate_pytransform_key(licfile, pubkey) - return prikey, pubkey, capkey, newkey, prolic - - -@dllmethod -def _generate_project_capsule(): - prototype = PYFUNCTYPE(py_object) - dlfunc = prototype(('generate_project_capsule', _pytransform)) - return dlfunc() - - -@dllmethod -def _generate_pytransform_key(licfile, pubkey): - prototype = PYFUNCTYPE(py_object, c_char_p, py_object) - dlfunc = prototype(('generate_pytransform_key', _pytransform)) - return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile, - pubkey) - - -# -# Deprecated functions from v5.1 -# -@dllmethod -def encrypt_project_files(proname, filelist, mode=0): - prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int) - dlfunc = prototype(('encrypt_project_files', _pytransform)) - return dlfunc(proname.encode(), filelist, mode) - - -def generate_project_capsule(licfile): - prikey, pubkey, prolic = _generate_project_capsule() - capkey = _encode_capsule_key_file(licfile) - return prikey, pubkey, capkey, prolic - - -@dllmethod -def _encode_capsule_key_file(licfile): - prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p) - dlfunc = prototype(('encode_capsule_key_file', _pytransform)) - return dlfunc(licfile.encode(), None) - - -@dllmethod -def encrypt_files(key, filelist, mode=0): - t_key = c_char * 32 - prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int) - dlfunc = prototype(('encrypt_files', _pytransform)) - return dlfunc(t_key(*key), filelist, mode) - - -@dllmethod -def generate_module_key(pubname, key): - t_key = c_char * 32 - prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p) - dlfunc = prototype(('generate_module_key', _pytransform)) - return dlfunc(pubname.encode(), t_key(*key), None) - -# -# Compatible for PyArmor v3.0 -# -@dllmethod -def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1): - '''Only for old version, before PyArmor 3''' - pyarmor_init(is_runtime=1) - prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int) - _init_runtime = prototype(('init_runtime', _pytransform)) - return _init_runtime(systrace, sysprofile, threadtrace, threadprofile) - - -@dllmethod -def import_module(modname, filename): - '''Only for old version, before PyArmor 3''' - prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p) - _import_module = prototype(('import_module', _pytransform)) - return _import_module(modname.encode(), filename.encode()) - - -@dllmethod -def exec_file(filename): - '''Only for old version, before PyArmor 3''' - prototype = PYFUNCTYPE(c_int, c_char_p) - _exec_file = prototype(('exec_file', _pytransform)) - return _exec_file(filename.encode()) diff --git a/pytransform/__pycache__/__init__.cpython-36.pyc b/pytransform/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 75bb2539be65062814dda6c5080880056d0f8c70..0000000000000000000000000000000000000000 Binary files a/pytransform/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/pytransform/__pycache__/__init__.cpython-38.pyc b/pytransform/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index 9aaff7fb561fefe38907458054c4c1944bed1091..0000000000000000000000000000000000000000 Binary files a/pytransform/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/pytransform/_pytransform.dll b/pytransform/_pytransform.dll deleted file mode 100644 index b1af3263115ebab5c213656cf1daf87b3510116b..0000000000000000000000000000000000000000 Binary files a/pytransform/_pytransform.dll and /dev/null differ diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..f19f26e829a55ba836dac15d9fb69f70a790d019 --- /dev/null +++ b/setup.py @@ -0,0 +1,49 @@ +# Use this guide: +# https://packaging.python.org/tutorials/packaging-projects/ + +# from unitgrade2.version import __version__ +import setuptools +with open("src/unitgrade_private2/version.py", "r", encoding="utf-8") as fh: + __version__ = fh.read().strip().split(" = ")[1].strip()[1:-1] +# long_description = fh.read() + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +setuptools.setup( + name="unitgrade-devel", + version=__version__, + author="Tue Herlau", + author_email="tuhe@dtu.dk", + description="A set of tools to develop unitgrade reports and evaluate them", + long_description=long_description, + long_description_content_type="text/markdown", + license="MIT", + url='https://lab.compute.dtu.dk/tuhe/unitgrade_private', + project_urls={ + "Bug Tracker": "https://lab.compute.dtu.dk/tuhe/unitgrade_private/issues", + }, + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + package_dir={"": "src"}, + packages=setuptools.find_packages(where="src"), + python_requires=">=3.8", + install_requires=['numpy', "unitgrade", "codesnipper", 'tabulate', 'tqdm', "pyfiglet", "colorama", "coverage", "compress_pickle"], +) + +# setup( +# name='unitgrade', +# version=__version__, +# packages=['unitgrade2'], +# url=, +# license='MIT', +# author='Tue Herlau', +# author_email='tuhe@dtu.dk', +# description=""" +# A student homework/exam evaluation framework build on pythons unittest framework. This package contains all files required to run unitgrade tests as a student. To develop tests, please use unitgrade_private. +# """, +# include_package_data=False, +# ) diff --git a/src/unitgrade_devel.egg-info/PKG-INFO b/src/unitgrade_devel.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..ab8dded1cac0cd7df8cf11240a7283c3fafd96b7 --- /dev/null +++ b/src/unitgrade_devel.egg-info/PKG-INFO @@ -0,0 +1,317 @@ +Metadata-Version: 2.1 +Name: unitgrade-devel +Version: 0.0.1 +Summary: A set of tools to develop unitgrade reports and evaluate them +Home-page: https://lab.compute.dtu.dk/tuhe/unitgrade_private +Author: Tue Herlau +Author-email: tuhe@dtu.dk +License: MIT +Project-URL: Bug Tracker, https://lab.compute.dtu.dk/tuhe/unitgrade_private/issues +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE + +# Unitgrade-private +Unitgrade is an automatic report and exam evaluation framework that enables instructors to offer automatically evaluated programming assignments. + Unitgrade is build on pythons `unittest` framework so that the tests can be specified in a familiar syntax and will integrate with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) and an easy and 100% safe mechanism for verifying the students results and creating additional, hidden tests. A powerful cache system allows instructors to automatically create test-answers based on a working solution. + + - 100% Python `unittest` compatible + - No external configuration files: Just write a `unittest` + - No unnatural limitations: Use any package or framework. If you can `unittest` it, it works. + - Granular security model: + - Students get public `unittests` for easy development of solutions + - Students get a tamper-resistant file to create submissions which are uploaded + - Instructors can automatically verify the students solution using a Docker VM and run hidden tests + - Tests are quick to run and will integrate with your IDE + +**Note: This is the development version of unitgrade. If you are a student, please see http://gitlab.compute.dtu.dk/tuhe/unitgrade.** + +# Using unitgrade +The examples can be found in the `/examples/` directory: https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/examples + +## A simple example +Unitgrade makes the following assumptions: + - Your code is in python + - Whatever you want to do can be specified as a `unittest` + +Although not required, it is recommended you maintain two version of the code: + - A fully-working version (i.e. all tests pass) + - A public version distributed to students (some code removed)) + +In this example, I will use `snipper` (see http://gitlab.compute.dtu.dk/tuhe/snipper) to synchronize the two versions automatically. +Let's look at an example. You need three files +``` +instructor/cs101/homework.py # This contains the students homework +instructor/cs101/report1.py # This contains the tests +instructor/cs101/deploy.py # A private file to deploy the tests +``` + +### The homework +The homework is just any old python code you would give to the students. For instance: +```python +def reverse_list(mylist): #!f + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + return list(reversed(mylist)) + +def add(a,b): #!f + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ + return a+b + +if __name__ == "__main__": + # Problem 1: Write a function which add two numbers + print(f"Your result of 2 + 2 = {add(2,2)}") + print(f"Reversing a small list", reverse_list([2,3,5,7])) +``` +### The test: +The test consists of individual problems and a report-class. The tests themselves are just regular Unittest (we will see a slightly smarter idea in a moment). For instance: + +```python +from looping import reverse_list, add +import unittest + + +class Week1(unittest.TestCase): + def test_add(self): + self.assertEqual(add(2, 2), 4) + self.assertEqual(add(-100, 5), -95) + + def test_reverse(self): + self.assertEqual(reverse_list([1, 2, 3]), [3, 2, 1]) + +``` +A number of tests can be collected into a `Report`, which will allow us to assign points to the tests and use the more advanced features of the framework later. A complete, minimal example: + +```python +from src.unitgrade2.unitgrade2 import Report +from src.unitgrade2 import evaluate_report_student +from looping import reverse_list, add +import unittest + + +class Week1(unittest.TestCase): + def test_add(self): + self.assertEqual(add(2, 2), 4) + self.assertEqual(add(-100, 5), -95) + + def test_reverse(self): + self.assertEqual(reverse_list([1, 2, 3]), [3, 2, 1]) + + +import cs101 + + +class Report1(Report): + title = "CS 101 Report 1" + questions = [(Week1, 10)] # Include a single question for 10 credits. + pack_imports = [cs101] + + +if __name__ == "__main__": + # Uncomment to simply run everything as a unittest: + # unittest.main(verbosity=2) + evaluate_report_student(Report1()) +``` + +### Deployment +The above is all you need if you simply want to use the framework as a self-check: Students can run the code and see how well they did. +In order to begin using the framework for evaluation we need to create a bit more structure. We do that by deploying the report class as follows: +```python +from report1 import Report1 +from unitgrade_private2.hidden_create_files import setup_grade_file_report +from snipper import snip_dir +import shutil + +if __name__ == "__main__": + setup_grade_file_report(Report1, minify=False, obfuscate=False, execute=False) + + # Deploy the files using snipper: https://gitlab.compute.dtu.dk/tuhe/snipper + snip_dir.snip_dir(source_dir="../programs", dest_dir="../../students/programs", clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py']) + +``` + - The first line creates the `report1_grade.py` script and any additional data files needed by the tests (none in this case) + - The second line set up the students directory (remember, we have included the solutions!) and remove the students solutions. You can check the results in the students folder. + +### Using the framework as a student +You can now upload the `student' directory to the students. The students can run their tests either by running `cs101.report1` in their IDE or by typing: +``` +python -m cs101.report1 +``` +in the command line. This produces a detailed output of the test and the program is 100% compatible with a debugger. When the students are happy with their output they can run (using command line or IDE): +``` +python -m cs101.report1_grade +``` +This runs an identical set of tests, but produces a `.token` file the students can upload to get credit. + - The reason to have a seperate `report1_grade.py` script is to avoid accidential removal of tests. + - The `report1_grade.py` includes all tests and the main parts of the framework and is obfuscated by default. You can apply a much strong level of protection by using e.g. `pyarmor`. + - The `report1_token.token` file includes the outcome of the tests, the time taken, and all python source code in the package. In other words, the file can be used for manual grading, for plagirism detection and for detecting tampering. + - You can easily use the framework to include output of functions. + - See below for how to validate the students results + +### How safe is this? +Cheating within the framework is probably best done by manually editing the `.token`-file or by creating a broken set of tests. This involves risk of being trivially detected, for instance because tests have the wrong runtime, but more importantly +the framework automatically pack all the used source code and so if a student is cheating, there is no way to hide it for an instructor who looks at the results. If the +program is used in conjunction with automatic plagiarism software, cheating therefore involves both breaking the framework, and creating 'false' solutions which statistically match other students solutions, and then hope nobody bothers to check the output. + The bottom line is that I think plain old plagiarism is a much more significant risk, and one the framework reduces relative to other project work +by demanding the source code is included. + +If this is not enough you have two options: You can either use `pyarmor` to create a **very** difficult challenge for a prospective hacker, or you can simply validate the students results as shown below. + + +## Example 2: The framework +One of the main advantages of `unitgrade` over web-based autograders it that tests are really easy to develop and maintain. To take advantage of this, we simply change the class the questions inherit from to `UTestCase` (this is still a `unittest.TestCase`) and we can make use of the chache system. As an example: + +```python +class Week1(UTestCase): + """ The first question for week 1. """ + def test_add(self): + from cs102.homework1 import add + self.assertEqualC(add(2,2)) + self.assertEqualC(add(-100, 5)) + + def test_reverse(self): + from cs102.homework1 import reverse_list + """ Reverse a list """ # Add a title to the test. + self.assertEqualC(reverse_list([1,2,3])) +``` +Note we have changed the test-function to `self.assertEqualC` (the `C` is for cache) and dropped the expected result. What `unitgrade` will do +is to evaluate the test *on the working version of the code*, compute the results of the test, and allow them to be available to the user. All this happens in the `deploy.py` script from before. + +There are other ways to send the output to the user. For instance: +```python +class Question2(UTestCase): + """ Second problem """ + @cache + def my_reversal(self, ls): + # The '@cache' decorator ensures the function is not run on the *students* computer + # Instead the code is run on the teachers computer and the result is passed on with the + # other pre-computed results -- i.e. this function will run regardless of how the student happens to have + # implemented reverse_list. + from cs102.homework1 import reverse_list + return reverse_list(ls) + + def test_reverse_tricky(self): + ls = ("butterfly", 4, 1) + ls2 = self.my_reversal( tuple(ls) ) # This will always produce the right result. + ls3 = self.my_reversal( tuple([1,2,3]) ) # Also works; the cache respects input arguments. + self.assertEqualC(self.my_reversal( tuple(ls2) )) # This will actually test the students code. + return ls +``` +This code showcase the `@cache` decorator. What it does is it computes the output of the function on your computer and allows that +result to be availble to students (the input arguments must be immutable). This may seem odd, but it is very helpful + - if you have exercises that depend on each other, and you want students to have access to the expected result of older methods which they may not have implemented correctly. + - If you want to use functions the students write to set up appropriate tests without giving away the solution + +Furthermore, one of the test now has a return value, which will be automatically included in the `.token` file. + +## Example 3: Hidden and secure tests +To use `unitgrade` as a true autograder you both want security nobody tampered with your tests (or the `.token` files), and +also that the students implementations didn't just detect what input was being used and +return the correct answer. To do that you need hidden tests and external validation. + +Our new testclass looks like this: + +```python +from src.unitgrade2.unitgrade2 import UTestCase, Report, hide +from src.unitgrade2 import evaluate_report_student + + +class Week1(UTestCase): + """ The first question for week 1. """ + + def test_add(self): + from cs103.homework1 import add + self.assertEqualC(add(2, 2)) + self.assertEqualC(add(-100, 5)) + + @hide + def test_add_hidden(self): + # This is a hidden test. The @hide-decorator will allow unitgrade to remove the test. + # See the output in the student directory for more information. + from cs103.homework1 import add + self.assertEqualC(add(2, 2)) + + +import cs103 + + +class Report3(Report): + title = "CS 101 Report 3" + questions = [(Week1, 20)] # Include a single question for 10 credits. + pack_imports = [cs103] + + +if __name__ == "__main__": + evaluate_report_student(Report3()) +``` + +This test is stored as `report3_complete.py`. Note the `@hide` decorator which will tell the framework that test (and all code) should be hidden from the user. + +In order to use the hidden tests, we first need a version for the students without them. This can be done by changing the `deploy.py` script as follows: + +```python +def deploy_student_files(): + setup_grade_file_report(Report3, minify=False, obfuscate=False, execute=False) + Report3.reset() + + fout, ReportWithoutHidden = remove_hidden_methods(Report3, outfile="report3.py") + setup_grade_file_report(ReportWithoutHidden, minify=False, obfuscate=False, execute=False) + sdir = "../../students/cs103" + snip_dir(source_dir="../cs103", dest_dir=sdir, clean_destination_dir=True, exclude=['__pycache__', '*.token', 'deploy.py', 'report3_complete*.py']) + return sdir + + +if __name__ == "__main__": + # Step 1: Deploy the students files and return the directory they were written to + student_directory = deploy_student_files() +``` +This script first compiles the `report3_complete_grade.py`-script (which we will use) and then +remove the hidden methods and compiles the students script `report3_grade.py`-script. Finally, we synchronize with the s +student folder, which now contains no traces of our hidden method -- not in any of the sources files or the data files. + +The next step is optional, but we quickly simulate that the student runs his script and we get a link to the `.token` file: +```python +os.system("cd ../../students && python -m cs103.report3_grade") +student_token_file = glob.glob(student_directory + "/*.token")[0] +``` +This is the file we assume the student uploads. The external validation can be carried out as follows: + +```python +def run_student_code_on_docker(Dockerfile, student_token_file): + token = docker_run_token_file(Dockerfile_location=Dockerfile, + host_tmp_dir=os.path.dirname(Dockerfile) + "/tmp", + student_token_file=student_token_file, + instructor_grade_script="report3_complete_grade.py") + with open(token, 'rb') as f: + results = pickle.load(f) + return results + +if __name__ == "__main__": + # Step 3: Compile the Docker image (obviously you will only do this once; add your packages to requirements.txt). + Dockerfile = os.path.dirname(__file__) + "/../unitgrade-docker/Dockerfile" + os.system("cd ../unitgrade-docker && docker build --tag unitgrade-docker .") + + # Step 4: Test the students .token file and get the results-token-file. Compare the contents with the students_token_file: + checked_token = run_student_code_on_docker(Dockerfile, student_token_file) + + # Let's quickly compare the students score to what we got (the dictionary contains all relevant information including code). + with open(student_token_file, 'rb') as f: + results = pickle.load(f) + print("Student's score was:", results['total']) + print("My independent evaluation of the students score was", checked_token['total']) +``` + +These steps compile a Docker image (you can easily add whatever packages you need) and runs **our** `project3_complete_grade.py` script on the **students** source code (as taken from the token file). + +The last lines load the result and compare the score -- in this case both will return 0 points, and any dissimilarity in the results should be immediate cause for concern. + + - Docker prevents students from doing mailicious things to your computer and allows the results to be reproducible by TAs. + + diff --git a/src/unitgrade_devel.egg-info/SOURCES.txt b/src/unitgrade_devel.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..b7b0b237dd0c27926a531a549111442e0139933a --- /dev/null +++ b/src/unitgrade_devel.egg-info/SOURCES.txt @@ -0,0 +1,18 @@ +LICENSE +README.md +pyproject.toml +setup.py +src/unitgrade_devel.egg-info/PKG-INFO +src/unitgrade_devel.egg-info/SOURCES.txt +src/unitgrade_devel.egg-info/dependency_links.txt +src/unitgrade_devel.egg-info/requires.txt +src/unitgrade_devel.egg-info/top_level.txt +src/unitgrade_private2/__init__.py +src/unitgrade_private2/deployment.py +src/unitgrade_private2/docker_helpers.py +src/unitgrade_private2/hidden_create_files.py +src/unitgrade_private2/hidden_gather_upload.py +src/unitgrade_private2/token_loader.py +src/unitgrade_private2/version.py +src/unitgrade_private2/autolab/__init__.py +src/unitgrade_private2/autolab/autolab.py \ No newline at end of file diff --git a/src/unitgrade_devel.egg-info/dependency_links.txt b/src/unitgrade_devel.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/src/unitgrade_devel.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/unitgrade_devel.egg-info/requires.txt b/src/unitgrade_devel.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..70825fe72886896537a011949bd91073ba2367ca --- /dev/null +++ b/src/unitgrade_devel.egg-info/requires.txt @@ -0,0 +1,9 @@ +numpy +unitgrade +codesnipper +tabulate +tqdm +pyfiglet +colorama +coverage +compress_pickle diff --git a/src/unitgrade_devel.egg-info/top_level.txt b/src/unitgrade_devel.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..9da46718d71b396e5ea4071196fec5b4b71d04f4 --- /dev/null +++ b/src/unitgrade_devel.egg-info/top_level.txt @@ -0,0 +1 @@ +unitgrade_private2 diff --git a/unitgrade_private2/__init__.py b/src/unitgrade_private2/__init__.py similarity index 97% rename from unitgrade_private2/__init__.py rename to src/unitgrade_private2/__init__.py index 3164fd600024d6e68669c78454d997897d638063..b233adc048e497dbefbe83c86bb489cbc122bf06 100644 --- a/unitgrade_private2/__init__.py +++ b/src/unitgrade_private2/__init__.py @@ -1,5 +1,6 @@ import os import compress_pickle +# __version__ = "0.0.1" def cache_write(object, file_name, verbose=True): dn = os.path.dirname(file_name) diff --git a/unitgrade_private2/codejudge_example/__init__.py b/src/unitgrade_private2/autolab/__init__.py similarity index 100% rename from unitgrade_private2/codejudge_example/__init__.py rename to src/unitgrade_private2/autolab/__init__.py diff --git a/autolab/autolab.py b/src/unitgrade_private2/autolab/autolab.py similarity index 89% rename from autolab/autolab.py rename to src/unitgrade_private2/autolab/autolab.py index 6f9fc05652e3aba3599a51ac30b120ede1707d19..1c863c577326a3f50e0ef38ccc5c6f2d61ea8592 100644 --- a/autolab/autolab.py +++ b/src/unitgrade_private2/autolab/autolab.py @@ -4,17 +4,14 @@ cd ~/Autolab && bundle exec rails s -p 8000 --binding=0.0.0.0 To remove my shitty image: docker rmi tango_python_tue """ -import inspect from zipfile import ZipFile -import os from os.path import basename import os import shutil from jinja2 import Environment, FileSystemLoader import glob import pickle -from unitgrade2.unitgrade2 import Report -import inspect +from src.unitgrade2.unitgrade2 import Report from unitgrade_private2 import docker_helpers COURSES_BASE = "/home/tuhe/Autolab/courses/AutoPopulated" @@ -58,10 +55,10 @@ def zipFilesInDir(dirName, zipFileName, filter): def paths2report(base_path, report_file): mod = ".".join(os.path.relpath(report_file[:-3], base_path).split(os.sep)) - # f2 = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101" - # spec1 = importlib.util.spec_from_file_location("cs101", f2) - # cs101 = importlib.util.module_from_spec(spec1) - # spec1.loader.exec_module(cs101) + # f2 = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/programs" + # spec1 = importlib.util.spec_from_file_location("programs", f2) + # programs = importlib.util.module_from_spec(spec1) + # spec1.loader.exec_module(programs) from importlib.machinery import SourceFileLoader foo = SourceFileLoader(mod, report_file).load_module() @@ -85,22 +82,6 @@ def run_relative(file, base): import inspect -# class Example: -# @property -# def title(self): -# stack = inspect.stack() -# return stack[1].function.__doc__ -# @title.setter -# def title(self, value): -# stack = inspect.stack() -# stack[1].function.__doc__ = value -# # self._title = value -# -# def myfun(self): -# self.title = 234 -# self.title -# -# return 3 def deploy_assignment(base_name, INSTRUCTOR_BASE, INSTRUCTOR_GRADE_FILE, STUDENT_BASE, STUDENT_GRADE_FILE, @@ -121,8 +102,8 @@ def deploy_assignment(base_name, INSTRUCTOR_BASE, INSTRUCTOR_GRADE_FILE, STUDENT LAB_DEST = os.path.join(COURSES_BASE, base_name) - # STUDENT_HANDOUT_DIR = os.path.dirname(STUDENT_GRADE_FILE) #"/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101" - # INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1.py" + # STUDENT_HANDOUT_DIR = os.path.dirname(STUDENT_GRADE_FILE) #"/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/programs" + # INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/programs/report5.py" # Make instructor token file. # Get the instructor result file. run_relative(INSTRUCTOR_GRADE_FILE, INSTRUCTOR_BASE) @@ -142,7 +123,7 @@ def deploy_assignment(base_name, INSTRUCTOR_BASE, INSTRUCTOR_GRADE_FILE, STUDENT print(scores) # Quickly make student .token file to upload: - # os.system(f"cd {os.path.dirname(STUDENT_HANDOUT_DIR)} && python -m cs101.{os.path.basename(INSTRUCTOR_GRADE_FILE)[:-3]}") + # os.system(f"cd {os.path.dirname(STUDENT_HANDOUT_DIR)} && python -m programs.{os.path.basename(INSTRUCTOR_GRADE_FILE)[:-3]}") # os.system(f"cd {STUDENT_HANDOUT_DIR} && python {os.path.basename(INSTRUCTOR_GRADE_FILE)}") # handin_filename = os.path.basename(STUDENT_TOKEN_FILE) @@ -165,7 +146,7 @@ def deploy_assignment(base_name, INSTRUCTOR_BASE, INSTRUCTOR_GRADE_FILE, STUDENT INSTRUCTOR_REPORT_FILE = INSTRUCTOR_GRADE_FILE[:-9] + ".py" a = 234 - # /home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1.py" + # /home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/programs/report5.py" data = { 'base_name': base_name, # 'nice_name': base_name + "please", @@ -213,10 +194,10 @@ if __name__ == "__main__": print("Deploying to", COURSES_BASE) docker_build_image() - INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/cs101/report1_grade.py" + INSTRUCTOR_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor/programs/report1_grade.py" INSTRUCTOR_BASE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/instructor" STUDENT_BASE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students" - STUDENT_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/cs101/report1_grade.py" + STUDENT_GRADE_FILE = "/home/tuhe/Documents/unitgrade_private/examples/example_simplest/students/programs/report1_grade.py" output_tar = deploy_assignment("hello4", INSTRUCTOR_BASE, INSTRUCTOR_GRADE_FILE, STUDENT_BASE, STUDENT_GRADE_FILE=STUDENT_GRADE_FILE) diff --git a/autolab/lab_template/Makefile b/src/unitgrade_private2/autolab/lab_template/Makefile similarity index 100% rename from autolab/lab_template/Makefile rename to src/unitgrade_private2/autolab/lab_template/Makefile diff --git a/autolab/lab_template/autograde-Makefile b/src/unitgrade_private2/autolab/lab_template/autograde-Makefile similarity index 100% rename from autolab/lab_template/autograde-Makefile rename to src/unitgrade_private2/autolab/lab_template/autograde-Makefile diff --git a/autolab/lab_template/autograde.tar b/src/unitgrade_private2/autolab/lab_template/autograde.tar similarity index 100% rename from autolab/lab_template/autograde.tar rename to src/unitgrade_private2/autolab/lab_template/autograde.tar diff --git a/autolab/lab_template/hello.rb b/src/unitgrade_private2/autolab/lab_template/hello.rb similarity index 100% rename from autolab/lab_template/hello.rb rename to src/unitgrade_private2/autolab/lab_template/hello.rb diff --git a/autolab/lab_template/hello.yml b/src/unitgrade_private2/autolab/lab_template/hello.yml similarity index 100% rename from autolab/lab_template/hello.yml rename to src/unitgrade_private2/autolab/lab_template/hello.yml diff --git a/autolab/lab_template/src/Makefile b/src/unitgrade_private2/autolab/lab_template/src/Makefile similarity index 100% rename from autolab/lab_template/src/Makefile rename to src/unitgrade_private2/autolab/lab_template/src/Makefile diff --git a/autolab/lab_template/src/Makefile-handout b/src/unitgrade_private2/autolab/lab_template/src/Makefile-handout similarity index 100% rename from autolab/lab_template/src/Makefile-handout rename to src/unitgrade_private2/autolab/lab_template/src/Makefile-handout diff --git a/autolab/lab_template/src/README b/src/unitgrade_private2/autolab/lab_template/src/README similarity index 100% rename from autolab/lab_template/src/README rename to src/unitgrade_private2/autolab/lab_template/src/README diff --git a/autolab/lab_template/src/README-handout b/src/unitgrade_private2/autolab/lab_template/src/README-handout similarity index 100% rename from autolab/lab_template/src/README-handout rename to src/unitgrade_private2/autolab/lab_template/src/README-handout diff --git a/autolab/lab_template/src/driver.sh b/src/unitgrade_private2/autolab/lab_template/src/driver.sh old mode 100755 new mode 100644 similarity index 100% rename from autolab/lab_template/src/driver.sh rename to src/unitgrade_private2/autolab/lab_template/src/driver.sh diff --git a/autolab/lab_template/src/driver_python.py b/src/unitgrade_private2/autolab/lab_template/src/driver_python.py similarity index 96% rename from autolab/lab_template/src/driver_python.py rename to src/unitgrade_private2/autolab/lab_template/src/driver_python.py index 1046485f3edb4a05bd67d6d134b38e0edfa95f4a..b5ad50fe453fba51e186bbfcb49b005427371c6f 100644 --- a/autolab/lab_template/src/driver_python.py +++ b/src/unitgrade_private2/autolab/lab_template/src/driver_python.py @@ -55,8 +55,8 @@ def rcom(cm): start = time.time() rcom(command) # pfiles() -# for f in glob.glob(host_tmp_dir + "/cs101/*"): -# print("cs101/", f) +# for f in glob.glob(host_tmp_dir + "/programs/*"): +# print("programs/", f) # print("---") ls = glob.glob(token) # print(ls) diff --git a/autolab/lab_template/src/hello.c b/src/unitgrade_private2/autolab/lab_template/src/hello.c similarity index 100% rename from autolab/lab_template/src/hello.c rename to src/unitgrade_private2/autolab/lab_template/src/hello.c diff --git a/autolab/lab_template/src/hello.c-handout b/src/unitgrade_private2/autolab/lab_template/src/hello.c-handout similarity index 100% rename from autolab/lab_template/src/hello.c-handout rename to src/unitgrade_private2/autolab/lab_template/src/hello.c-handout diff --git a/unitgrade_private2/deployment.py b/src/unitgrade_private2/deployment.py similarity index 94% rename from unitgrade_private2/deployment.py rename to src/unitgrade_private2/deployment.py index e25bd89d6bc64f01cd2ba416618b63aa00c41957..1acee0845b17c323e2fcf3eb92a75afe28c64699 100644 --- a/unitgrade_private2/deployment.py +++ b/src/unitgrade_private2/deployment.py @@ -1,6 +1,5 @@ import inspect -from unitgrade2.unitgrade2 import methodsWithDecorator, hide -import os +from src.unitgrade2.unitgrade2 import methodsWithDecorator, hide import os import importlib diff --git a/unitgrade_private2/docker_helpers.py b/src/unitgrade_private2/docker_helpers.py similarity index 58% rename from unitgrade_private2/docker_helpers.py rename to src/unitgrade_private2/docker_helpers.py index cde3e6e03ba0c0104e0f61ec7aca43991d15c59b..81bbd120a3e754c60f06a5722eec53012927325e 100644 --- a/unitgrade_private2/docker_helpers.py +++ b/src/unitgrade_private2/docker_helpers.py @@ -1,15 +1,23 @@ # from cs202courseware.ug2report1 import Report1 -# import thtools + import pickle import os import glob -# from unitgrade_private2.deployment import remove_hidden_methods -# from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet -# from unitgrade_private2.hidden_create_files import setup_grade_file_report import shutil import time import zipfile import io +import inspect +import subprocess + +def compile_docker_image(Dockerfile, tag=None): + assert os.path.isfile(Dockerfile) + base = os.path.dirname(Dockerfile) + if tag == None: + tag = os.path.basename(base) + os.system(f"cd {base} && docker build --tag {tag} .") + return tag + def student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade_script, grade_file_relative_destination): """ @@ -20,7 +28,8 @@ def student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade :param instructor_grade_script: :return: """ - # assert os.path.exists(Dockerfile_location) + assert os.path.exists(student_token_file) + assert os.path.exists(instructor_grade_script) start = time.time() with open(student_token_file, 'rb') as f: @@ -31,15 +40,11 @@ def student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade with zipfile.ZipFile(zb) as zip: zip.extractall(host_tmp_dir) # Done extracting the zip file! Now time to move the (good) report test class into the location. - import inspect - # if ReportClass is not None: - # gscript = inspect.getfile(ReportClass)[:-3] + "_grade.py" - # else: + gscript = instructor_grade_script print(f"{sources['report_relative_location']=}") print(f"{sources['name']=}") - # student_grade_script = host_tmp_dir + "/" + sources['name'] + "/" + sources['report_relative_location'] - # instructor_grade_script = os.path.dirname(student_grade_script) + "/" + os.path.basename(gscript) + print("Now in docker_helpers.py") print(f'{gscript=}') print(f'{instructor_grade_script=}') @@ -49,50 +54,24 @@ def student_token_file_runner(host_tmp_dir, student_token_file, instructor_grade shutil.copy(gscript, gscript_destination) # Now everything appears very close to being set up and ready to roll!. - # import thtools - - # os.path.split() d = os.path.normpath(grade_file_relative_destination).split(os.sep) d = d[:-1] + [os.path.basename(instructor_grade_script)[:-3]] - # print(f'{d=}') pycom = ".".join(d) - """ docker run -v c:/Users/tuhe/Documents/2021/python-docker/tmp:/app python-docker python3 -m cs202courseware.ug2report1_grade """ - # dockname = os.path.basename(os.path.dirname(Dockerfile_location)) - - # tmp_grade_file = sources['name'] + "/" + sources['report_relative_location'] - # print(f'{tmp_grade_file=}') - # pycom = ".".join((sources['name'],) + os.path.split(sources['report_relative_location'])[1:-1] + (os.path.basename(gscript),)) pycom = "python3 -m " + pycom # pycom[:-3] print(f"{pycom=}") - # tmp_path = os.path.abspath(host_tmp_dir).replace("\\", "/") - # dcom = f"docker run -v {tmp_path}:/app {dockname} {pycom}" - # cdcom = f"cd {os.path.dirname(Dockerfile_location)}" - # fcom = f"{cdcom} && {dcom}" - # print("> Running docker command") - # print(fcom) - - # thtools.execute_command(fcom.split()) - # get token file: token_location = host_tmp_dir + "/" + os.path.dirname( grade_file_relative_destination ) + "/*.token" - - # host_tmp_dir + "/" + os.path.dirname(tmp_grade_file) + "/" - # tokens = glob.glob(host_tmp_dir + "/" + os.path.dirname(tmp_grade_file) + "/*.token") - # token_location = host_tmp_dir + "/" + os.path.dirname(tmp_grade_file) - - # for t in tokens: - # print("Source image produced token", t) elapsed = time.time() - start # print("Elapsed time is", elapsed) return pycom, token_location - pass -def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file, ReportClass=None, instructor_grade_script=None): + +def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file, instructor_grade_script=None): """ This thingy works: @@ -119,41 +98,33 @@ def docker_run_token_file(Dockerfile_location, host_tmp_dir, student_token_file, with zipfile.ZipFile(zb) as zip: zip.extractall(host_tmp_dir) # Done extracting the zip file! Now time to move the (good) report test class into the location. - import inspect - if ReportClass is not None: - gscript = inspect.getfile(ReportClass)[:-3] + "_grade.py" - else: - gscript = instructor_grade_script + gscript = instructor_grade_script - student_grade_script = host_tmp_dir + "/" + sources['name'] + "/" + sources['report_relative_location'] + student_grade_script = host_tmp_dir + "/" + sources['report_relative_location'] instructor_grade_script = os.path.dirname(student_grade_script) + "/"+os.path.basename(gscript) shutil.copy(gscript, instructor_grade_script) - # Now everything appears very close to being set up and ready to roll!. - import thtools - """ - docker run -v c:/Users/tuhe/Documents/2021/python-docker/tmp:/app python-docker python3 -m cs202courseware.ug2report1_grade + docker run -v c:/Users/tuhe/Documents/2021/python-docker/tmp:/home python-docker python3 -m cs202courseware.ug2report1_grade """ dockname = os.path.basename( os.path.dirname(Dockerfile_location) ) tmp_grade_file = sources['name'] + "/" + sources['report_relative_location'] - pycom = ".".join( (sources['name'], ) + os.path.split(sources['report_relative_location'])[1:-1] + (os.path.basename(gscript),) ) - pycom = "python3 -m " + pycom[:-3] + pycom = ".".join( sources['report_module_specification'][:-1] + [os.path.basename(gscript)[:-3],] ) + pycom = "python3 -m " + pycom tmp_path = os.path.abspath(host_tmp_dir).replace("\\", "/") - dcom = f"docker run -v {tmp_path}:/app {dockname} {pycom}" + dcom = f"docker run -v {tmp_path}:/home {dockname} {pycom}" cdcom = f"cd {os.path.dirname(Dockerfile_location)}" fcom = f"{cdcom} && {dcom}" print("> Running docker command") print(fcom) init = time.time() - start - thtools.execute_command(fcom.split()) - # get token file: - + # thtools.execute_command(fcom.split()) + subprocess.check_output(fcom.split(), shell=True).decode("utf-8") host_tmp_dir +"/" + os.path.dirname(tmp_grade_file) + "/" - tokens = glob.glob(host_tmp_dir +"/" + os.path.dirname(tmp_grade_file) + "/*.token" ) + tokens = glob.glob( os.path.dirname(instructor_grade_script) + "/*.token" ) for t in tokens: print("Source image produced token", t) elapsed = time.time() - start diff --git a/unitgrade_private2/example/report0.py b/src/unitgrade_private2/example/report0.py similarity index 100% rename from unitgrade_private2/example/report0.py rename to src/unitgrade_private2/example/report0.py diff --git a/unitgrade_private2/hidden_create_files.py b/src/unitgrade_private2/hidden_create_files.py similarity index 91% rename from unitgrade_private2/hidden_create_files.py rename to src/unitgrade_private2/hidden_create_files.py index b041c81318d449cd4482f14b6db47e097a8fb8cb..50796e6040bff490250167d39c9c87bc5ea274c2 100644 --- a/unitgrade_private2/hidden_create_files.py +++ b/src/unitgrade_private2/hidden_create_files.py @@ -1,4 +1,4 @@ -from unitgrade2 import cache_read, cache_write +from src.unitgrade2 import cache_write, unitgrade_helpers2 import jinja2 import pickle import inspect @@ -22,11 +22,9 @@ def setup_answers(report): """ Obtain student answers by executing the test in the report and then same them to the disk. """ - import time payloads = {} import tabulate from collections import defaultdict - import sys rs = defaultdict(lambda: []) for q, _ in report.questions: # for q, _ in report.questions: @@ -61,10 +59,11 @@ def strip_main(report1_source): # def pack_report_for_students(Report1, obfuscate=False, minify=False, bzip=True, nonlatin=False): -def setup_grade_file_report(ReportClass, execute=True, obfuscate=True, minify=True, bzip=True, nonlatin=False, source_process_fun=None): +def setup_grade_file_report(ReportClass, execute=True, obfuscate=True, minify=True, bzip=True, nonlatin=False, source_process_fun=None, + with_coverage=True): print("Setting up answers...") # ReportClass() - payload = ReportClass()._setup_answers() + payload = ReportClass()._setup_answers(with_coverage=with_coverage) # setup_answers(ReportClass()) import time time.sleep(0.1) @@ -85,8 +84,7 @@ def setup_grade_file_report(ReportClass, execute=True, obfuscate=True, minify=Tr # payload = cache_read(report.computed_answers_file) picklestring = pickle.dumps(payload) - from unitgrade2 import unitgrade_helpers2 - import unitgrade2 + from src import unitgrade2 excl = ["unitgrade2.unitgrade_helpers2", "from . import", "from unitgrade2.", @@ -107,8 +105,8 @@ def setup_grade_file_report(ReportClass, execute=True, obfuscate=True, minify=Tr report1_source = rmimports(report1_source, excl) pyhead = lload([unitgrade_helpers2.__file__, hidden_gather_upload.__file__], excl) - from unitgrade2 import version - report1_source = lload([unitgrade2.__file__, unitgrade2.unitgrade2.__file__, unitgrade_helpers2.__file__, hidden_gather_upload.__file__, version.__file__], excl) + "\n" + report1_source + from src.unitgrade2 import version + report1_source = lload([unitgrade2.__file__, src.unitgrade2.unitgrade2.__file__, unitgrade_helpers2.__file__, hidden_gather_upload.__file__, version.__file__], excl) + "\n" + report1_source print(sys.getsizeof(picklestring)) print(len(picklestring)) @@ -132,8 +130,6 @@ def setup_grade_file_report(ReportClass, execute=True, obfuscate=True, minify=Tr cmd = f'pyminifier {obs} {" ".join(extra)} --replacement-length=20 -o {output} {output}' print(cmd) os.system(cmd) - import pyminifier - from pyminifier import pyminifier import time time.sleep(0.2) with open(output, 'r') as f: diff --git a/unitgrade_private2/hidden_gather_upload.py b/src/unitgrade_private2/hidden_gather_upload.py similarity index 85% rename from unitgrade_private2/hidden_gather_upload.py rename to src/unitgrade_private2/hidden_gather_upload.py index 0fb11d89ed3559b389842ad21fb7cfb442a93093..db76a830528ea3aca8cbd0414360213006477128 100644 --- a/unitgrade_private2/hidden_gather_upload.py +++ b/src/unitgrade_private2/hidden_gather_upload.py @@ -1,13 +1,8 @@ -from unitgrade2.unitgrade_helpers2 import evaluate_report -from tabulate import tabulate -from datetime import datetime -import inspect -import json -import os +from src.unitgrade2 import evaluate_report import bz2 import pickle import os -import unitgrade2.unitgrade_helpers2 + def bzwrite(json_str, token): # to get around obfuscation issues with getattr(bz2, 'open')(token, "wt") as f: @@ -22,7 +17,8 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module' and False: + + if hasattr(m, '__file__') and not hasattr(m, '__path__'): # Importing a simple file: m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) module_import = True else: @@ -43,7 +39,7 @@ def gather_imports(imp): for file in files: if file.endswith(".py"): fpath = os.path.join(root, file) - v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package)) + v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package) if not module_import else top_package) zip.write(fpath, v) resources['zipfile'] = zip_buffer.getvalue() @@ -87,14 +83,14 @@ def gather_upload_to_campusnet(report, output_dir=None): results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True, show_progress_bar=not args.noprogress, big_header=not args.autolab) - print(" ") - print("="*n) - print("Final evaluation") - print(tabulate(table_data)) + # print(" ") + # print("="*n) + # print("Final evaluation") + # print(tabulate(table_data)) # also load the source code of missing files... sources = {} - + print("") if not args.autolab: if len(report.individual_imports) > 0: print("By uploading the .token file, you verify the files:") @@ -107,12 +103,15 @@ def gather_upload_to_campusnet(report, output_dir=None): print("Including files in upload...") for k, m in enumerate(report.pack_imports): nimp, top_package = gather_imports(m) - report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) + _, report_relative_location, module_import = report._import_base_relative() + + # report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package) nimp['report_relative_location'] = report_relative_location + nimp['report_module_specification'] = module_import nimp['name'] = m.__name__ sources[k] = nimp # if len([k for k in nimp if k not in sources]) > 0: - print(f"*** {m.__name__}") + print(f" * {m.__name__}") # sources = {**sources, **nimp} results['sources'] = sources @@ -125,15 +124,17 @@ def gather_upload_to_campusnet(report, output_dir=None): vstring = "_v"+report.version if report.version is not None else "" token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring) - token = os.path.join(output_dir, token) + token = os.path.normpath(os.path.join(output_dir, token)) + + with open(token, 'wb') as f: pickle.dump(results, f) if not args.autolab: print(" ") - print("To get credit for your results, please upload the single file: ") + print("To get credit for your results, please upload the single unmodified file: ") print(">", token) - print("To campusnet without any modifications.") + # print("To campusnet without any modifications.") # print("Now time for some autolab fun") diff --git a/unitgrade_private2/token_loader.py b/src/unitgrade_private2/token_loader.py similarity index 99% rename from unitgrade_private2/token_loader.py rename to src/unitgrade_private2/token_loader.py index 2feb2a8d3a9ab0c2872a83c062ba7d50073c8920..a79f6ec5937a916c71b00ab53820441bcbbc0fe4 100644 --- a/unitgrade_private2/token_loader.py +++ b/src/unitgrade_private2/token_loader.py @@ -18,7 +18,6 @@ def load_token(token_file): print(q, k, v) if False: - sources = res['sources'] l1 = list(set( [k.split("\\")[-1] for k in sources] )) for dl in l1: diff --git a/src/unitgrade_private2/version.py b/src/unitgrade_private2/version.py new file mode 100644 index 0000000000000000000000000000000000000000..06fbe7e6baf05b88b04bb75858ca74be373cc8f8 --- /dev/null +++ b/src/unitgrade_private2/version.py @@ -0,0 +1 @@ +version = "0.0.1" \ No newline at end of file diff --git a/tutorial/ncode.py b/tutorial/ncode.py new file mode 100644 index 0000000000000000000000000000000000000000..cb66a6159178a4e4892d22adf2fe1be4690a55ad --- /dev/null +++ b/tutorial/ncode.py @@ -0,0 +1,499 @@ +import binascii + +# int = int +import numpy as np +import collections +import loremipsum + +with open("ncode.py", 'r') as f: + s = f.read() + # s.splitlines() + for k, line in enumerate(s.splitlines()): + # l = line.strip() + l = line + if '=' in l and not l.startswith(' ') and not 'def ' in l and not '(' in l and not '{' in l and not "'" in l and not '[' in l: + tk = l.split("=") + if len(tk) == 2: + (a,b) = tk + if len(a) > 8 and not a.startswith("'") or a.startswith(' ') and not '(' in b: + a = a.strip() + b = b.strip() + if b not in ['False', '79']: + print(l) + s = s.replace(a, b + ' ') + s = s.replace(f"{b} = {b}", ' ') +# with open('ncode2.py', 'w') as f: +# f.write(s) + + +import math + + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKR(msg, pubkey): + bits = int( + math.log(pubkey[1], 256)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRqr = bits + 1 + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKq = '%%0%dx' % ( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRqr * 2,) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKr = msg.encode() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq = [] + for kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrK in range(0, + len( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKr), + bits): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKr[ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrK:kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrK + bits] + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK += b'\x00' * ( + bits - len( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRr = int( + binascii.hexlify(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK), 16) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKR = pow( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRr, *pubkey) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKr = binascii.unhexlify(( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKq % kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKR).encode()) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq.append(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKr) + return b''.join(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq) + + +""" +submission_autograder.py: Local autograder client. +See README.md for a summary of how this program works. +Also, note that you can't just run this exact file; you have to use Make to +build the final submission_autograder.py file, then run that. +The build process (Makefile) #includes header.py and rsa.py here: +* header.py replaces the print statement with the Python 3 print() function. +* header.py replaces open with codecs.open; this must be done in header.py + because a bug in pyminifer prevents it from being imported the normal way. +* rsa.py imports binascii and math. +* rsa.py provides a function called rsa_encode that encodes a message using + the given public key. +""" +import base64 +import hashlib + +hashlib.sha256 = hashlib.sha256 +import json + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqTr = json.dumps +import logging + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq = logging.critical +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK = logging.debug +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrT = logging.CRITICAL +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqKr = logging.DEBUG +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqKT = logging.basicConfig +import os + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKrT = os.environ +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqr = os.listdir +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqT = os.getcwd +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr = os.path +import platform + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKrq = platform.uname +import re + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTq = re.match +import shutil + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTK = shutil.copyfile +import subprocess + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrqK = subprocess.PIPE +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrqT = subprocess.Popen +import sys + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRr = sys.argv +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK = sys.exit +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrKq = sys.stdout +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrKT = sys.executable +import tempfile + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTKR = tempfile.mkdtemp +import time + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTrK = time.gmtime +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTrR = time.strftime +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTKr = time.time +import urllib.request +import zipfile + +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqRTK = zipfile.ZipFile +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrR = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTKr() +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK = 'tutorial' +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKRq = { + 'tutorial': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/tutorial.zip', + 'search': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/search.zip', + 'multiagent': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/multiagent.zip', + 'reinforcement': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/reinforcement.zip', + 'bayesnets': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/bayesNets2.zip', + 'tracking': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/tracking.zip', + 'classification': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/classification_sp16.zip', + 'machinelearning': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/machinelearning.zip', } +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKRr = {'tutorial': ['shopSmart.py', 'buyLotsOfFruit.py', 'addition.py'], + 'search': ['searchAgents.py', 'search.py'], + 'multiagent': ['multiAgents.py'], + 'reinforcement': ['analysis.py', 'qlearningAgents.py', + 'valueIterationAgents.py'], + 'bayesnets': ['factorOperations.py', 'inference.py', + 'bayesAgents.py'], + 'tracking': ['bustersAgents.py', 'inference.py'], + 'classification': ['perceptron.py', 'answers.py', 'solvers.py', + 'search_hyperparams.py', 'features.py'], + 'machinelearning': ['nn.py', 'models.py'], } +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKqR = False +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKqr = '1.4.0' +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKrR = 20000000 if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK == 'machinelearning' else 5000000 +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKrq = [kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrKT or 'python', + 'autograder.py', '--mute', '--no-graphics', '--edx-output'] +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq = 79 +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRK = '%A, %B %d, %Y, %H:%M:%S' +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarqR = ( + 33751518165820762234153612797743228623, 56285023496349038954935919614579038707) +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarqK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKRq[ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK].replace('https://', 'http://') +kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarKR = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKRr[ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK] + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq(s, width=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq, + indent=0, right_margin=5): + print(' ' * indent + s + '.' * ( + width - len(s) - right_margin - indent), end='') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrKq.flush() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq(msg='DONE', indent=1): + print(' ' * indent + msg) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrKq.flush() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaK(file_path, block_size=65536): + if not kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.isfile(file_path): + return '(not file)' + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.getsize( + file_path) > kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKrR: + return '(file too big)' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRarq = hashlib.sha256() + with open(file_path, 'rb')as f: + for kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK in iter(lambda: f.read(block_size), b''): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRarq.update( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRK) + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRarq.hexdigest() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRqa(file_path, mode='r'): + if not kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.isfile(file_path): + return '(not file)' + with open(file_path, mode)as f: + return f.read() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRqK(): + print('-' * kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq, + end='\n\n') + print('CS 188 Local Submission Autograder') + print('Version ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKqr, + end='\n\n') + print('-' * kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq, + end='\n\n') + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKa(): + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKqR: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRarK = 'submission_autograder.log' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqKT(format='%(asctime)s - %(levelname)s - %(message)s', + level=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqKr, + stream=open( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRarK, 'w')) + else: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqKT(format='\nERROR - %(message)s', + level=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrT) + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKq(): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqaK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.dirname( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.realpath(__file__)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqar = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqT() + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqar != kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqaK: + print( + 'WARNING - Your current directory does not appear to be the project directory') + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqaR(): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq('Setting up environment') + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTKR( + prefix='cs188-') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK( + 'Temporary directory created at ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq() + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa + except Exception as e: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq( + 'Could not create temp directory: ' + str(e)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(104) + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqaK(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR, dest_dir): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq('Downloading autograder') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK( + 'Downloading from ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR) + try: + f = urllib.request.urlopen(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.join( + dest_dir, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.basename( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR)) + with open(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr, + 'wb')as kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqra: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqra.write(f.read()) + except Exception as e: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq( + 'Download failed: ' + str(e)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(101) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK( + 'Downloaded to ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq() + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqRa(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKq, dest_dir): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq('Extracting autograder') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK( + 'Extracting ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKq) + with kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqRTK(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKq)as f: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK = f.namelist() + if len(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK) == 0: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq('ZIP archive contains no files') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(102) + main = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.join(dest_dir, f.namelist()[0]) + try: + f.extractall(dest_dir) + except Exception as e: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq( + 'Extraction from zip file failed: ' + str(e)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(105) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqrK('Extracted inner directory ' + main) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq() + return main + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqRK(dest_dir): + print('Preparing student files:') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKRr[ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK] + for f in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq(f, width=40, indent=2) + if not kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.isfile(f): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq('Could not find required file: ' + f) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(201) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTK(f, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.join( + dest_dir, f)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq('OK') + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqKa(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK): + print('Running tests (this may take a while):') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra = '' + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrqT( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaKrq, + stdout=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrqK, + cwd=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK) + for kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr in iter( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.stdout.readline, b''): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.decode() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar += kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.strip() + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTq(r'Question q\d+$', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr, + width=40, indent=2) + elif kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTq(r'### Question q\d+: \d+/\d+ ###', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.split(': ')[1].strip('#')) + elif '*** NOTE: Make sure to complete' in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq('skipped') + elif kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRrTq(r'Total: \d+/\d+$', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra = \ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.split(': ')[1] + if 'ImportError' in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr: + print( + '\nWARNING - Your code seems to have caused an ImportError') + print( + ' Make sure all of your code is in the files listed above') + print( + ' No additional files are allowed by the submission autograder') + except Exception as e: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTq( + 'Autograder invocation failed: ' + str(e)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTRK(103) + finally: + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.poll() is None: + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.kill() + except OSError: + pass + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqKR(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKq('Generating submission token') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKrq = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqr( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRraq = [kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaK( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.join(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK, k)) + for k in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKrq] + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRraK = [kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRqa( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKTr.join(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK, k)) + for k in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarKR] + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqa = {'project': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK, + 'local_time': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTrR( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRK), + 'gmt_time': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTrR( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRK, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTrK()), + 'duration_sec': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaqTKr() - kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrR, + 'score': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra, + 'raw_output': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, + 'self_contents': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRqa( + __file__), + 'current_dir': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqT(), + 'current_dir_ls': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKqr( + '.'), + 'work_dir': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK, + 'work_dir_ls': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKrq, + 'work_dir_checksums': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRraq, + 'work_dir_student_files': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRraK, + 'env': str( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKrT), + 'os': kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRKrq()} + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqrK + '.token' + with open(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK, + 'wb')as f: + f.write(binascii.b2a_base64(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTraKR( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaRqTr(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqa), + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarqR))) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRaq() + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKaR(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK): + print('\n' + '-' * kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq, + end='\n\n') + print( + 'Final score: ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra) + print( + 'Token file: ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK, end='\n\n') + print( + 'Please make sure that this score matches the result produced by autograder.py.', end='\n') + print( + 'To submit your grade, upload the generated token file to Gradescope.', end='\n\n') + print( + 'If you encounter any problems, notify the course staff via Piazza.', end='\n\n') + print('-' * kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarRq) + + +def main(): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRqK() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKa() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKq() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKa = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqaR() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKq = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqaK( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTarqK, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKa) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqRa( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKq, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrKa) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqRK(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqKa( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrqKR( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKaR(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRrqK) + + +if __name__ == '__main__': + main() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKRa(choices): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRr = sum( + w for c, w in choices) + r = np.uniform(0, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRr) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKR = 0 + for c, w in choices: + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKR + w >= r: + return c + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKR += w + assert False, "Shouldn't get here" + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKRq(p=0.5): + return np.random() < p + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKqa(value, p=0.5): + return value if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKRq( + p) else None + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKqR(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK, n, + nonempty=False): + if nonempty: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKr = np.randrange(1, n) + else: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKr = np.randrange(n) + return [kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK() for _ in + range(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaKr)] + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRqK(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK, n, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarK): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarR = collections.OrderedDict() + while len(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarR) < n: + v = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarR[getattr(v, + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarK)] = v + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqarR.values() + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRqr(): + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRKq(): + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRKr(w): + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKRq(0.9): + return w + return np.choice(['`{}`', '_{}_', '*{}*']).format(w) + + return ' '.join( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRKr(w) for w in loremipsum.get_sentence().split()) + + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRrq(): + return '{0} {1}'.format('#' * np.randrange(1, 7), loremipsum.get_sentence()) + + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRrK(): + return '```{0}```'.format( + '\n'.join(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKqR(loremipsum.get_sentence, 4))) + + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTqRK(): + return '\n'.join( + '* ' + s for s in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKqR(loremipsum.get_sentence, 4)) + + def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTqRr(): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKRa( + [(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRKq, 7), + (kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRrq, 1), + (kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTRrK, 1), + (kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTqRK, 1)]) + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqRaK() + + return '\n\n'.join( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrKqR(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiaTqRr, 4, + nonempty=True)) diff --git a/tutorial/ncode2.py b/tutorial/ncode2.py new file mode 100644 index 0000000000000000000000000000000000000000..7f991cb00910a61a8268e80b6e84ef295333050b --- /dev/null +++ b/tutorial/ncode2.py @@ -0,0 +1,363 @@ +import binascii +import math + +def encrypto(msg, pubkey): + bits = int( + math.log(pubkey[1], 256)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRqr = bits + 1 + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKq = '%%0%dx' % ( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRqr * 2,) + msg_encode = msg.encode() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq = [] + for k in range(0, + len( + msg_encode), + bits): + msg_block = msg_encode[ + k:k + bits] + msg_block += b'\x00' * ( + bits - len( + msg_block)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRr = int( + binascii.hexlify(msg_block), 16) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKR = pow( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqRr, *pubkey) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKr = binascii.unhexlify(( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRKq % kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKR).encode()) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq.append(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaqKr) + return b''.join(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTaRrq) + + +""" +submission_autograder.py: Local autograder client. +See README.md for a summary of how this program works. +Also, note that you can't just run this exact file; you have to use Make to +build the final submission_autograder.py file, then run that. +The build process (Makefile) #includes header.py and rsa.py here: +* header.py replaces the print statement with the Python 3 print() function. +* header.py replaces open with codecs.open; this must be done in header.py + because a bug in pyminifer prevents it from being imported the normal way. +* rsa.py imports binascii and math. +* rsa.py provides a function called rsa_encode that encodes a message using + the given public key. +""" +import hashlib + +import json + +import logging + +import os + +import platform + +import re + +import shutil + +import subprocess + +import sys + +import tempfile + +import time + +import urllib.request +import zipfile + +start_time = time.time() +report_name = 'tutorial' +download_urls = { + 'tutorial': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/tutorial.zip', + 'search': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/search.zip', + 'multiagent': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/multiagent.zip', + 'reinforcement': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/reinforcement.zip', + 'bayesnets': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/bayesNets2.zip', + 'tracking': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/tracking.zip', + 'classification': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/classification_sp16.zip', + 'machinelearning': 'https://inst.eecs.berkeley.edu/~cs188/fa19/assets/files/machinelearning.zip', } +pack_files = {'tutorial': ['shopSmart.py', 'buyLotsOfFruit.py', 'addition.py'], + 'search': ['searchAgents.py', 'search.py'], + 'multiagent': ['multiAgents.py'], + 'reinforcement': ['analysis.py', 'qlearningAgents.py', + 'valueIterationAgents.py'], + 'bayesnets': ['factorOperations.py', 'inference.py', + 'bayesAgents.py'], + 'tracking': ['bustersAgents.py', 'inference.py'], + 'classification': ['perceptron.py', 'answers.py', 'solvers.py', + 'search_hyperparams.py', 'features.py'], + 'machinelearning': ['nn.py', 'models.py'], } +# False = False +version = '1.4.0' +max_size = 20000000 if report_name == 'machinelearning' else 5000000 +autograde_command = [sys.executable or 'python', + 'autograder.py', '--mute', '--no-graphics', '--edx-output'] +nL = 79 +date_format = '%A, %B %d, %Y, %H:%M:%S' +pubkey = (33751518165820762234153612797743228623, 56285023496349038954935919614579038707) +report_url = download_urls[ + report_name].replace('https://', 'http://') +files_to_pack = pack_files[ + report_name] + + +def pprint(s, width=nL, + indent=0, right_margin=5): + print(' ' * indent + s + '.' * ( + width - len(s) - right_margin - indent), end='') + sys.stdout.flush() + + +def post_token_generation(msg='DONE', indent=1): + print(' ' * indent + msg) + sys.stdout.flush() + + +def sha_get_checksum(file_path, block_size=65536): + if not os.path.isfile(file_path): + return '(not file)' + if os.path.getsize( + file_path) > max_size: + return '(file too big)' + sha = hashlib.sha256() + with open(file_path, 'rb')as f: + for block in iter(lambda: f.read(block_size), b''): + sha.update(block) + return sha.hexdigest() + + +def load_file(file_path, mode='r'): + if not os.path.isfile(file_path): + return '(not file)' + with open(file_path, mode)as f: + return f.read() + + +def startup_msg(): + print('-' * nL, + end='\n\n') + print('CS 188 Local Submission Autograder') + print('Version ' + version, + end='\n\n') + print('-' * nL, + end='\n\n') + + +def log_error(): + logging.basicConfig(format='\nERROR - %(message)s', + level=logging.CRITICAL) + + +def kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKq(): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqaK = os.path.dirname( + os.path.realpath(__file__)) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqar = os.getcwd() + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqar != kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqaK: + print( + 'WARNING - Your current directory does not appear to be the project directory') + + +def setup_temp(): + pprint('Setting up environment') + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa = tempfile.mkdtemp( + prefix='cs188-') + logging.debug( + 'Temporary directory created at ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa) + post_token_generation() + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKa + except Exception as e: + logging.critical( + 'Could not create temp directory: ' + str(e)) + sys.exit(104) + + +def download_autograder(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR, dest_dir): + pprint('Downloading autograder') + logging.debug( + 'Downloading from ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR) + try: + f = urllib.request.urlopen(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR) + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr = os.path.join( + dest_dir, os.path.basename( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTKraR)) + with open(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr, + 'wb')as kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqra: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqra.write(f.read()) + except Exception as e: + logging.critical( + 'Download failed: ' + str(e)) + sys.exit(101) + logging.debug( + 'Downloaded to ' + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr) + post_token_generation() + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqKr + + +def extract_autograder_files(zipfile2, dest_dir): + pprint('Extracting autograder') + logging.debug( + 'Extracting ' + zipfile2) + with zipfile.ZipFile(zipfile2)as f: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK = f.namelist() + if len(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRqrK) == 0: + logging.critical('ZIP archive contains no files') + sys.exit(102) + main = os.path.join(dest_dir, f.namelist()[0]) + try: + f.extractall(dest_dir) + except Exception as e: + logging.critical( + 'Extraction from zip file failed: ' + str(e)) + sys.exit(105) + logging.debug('Extracted inner directory ' + main) + post_token_generation() + return main + + +def move_student_files_to_tmp_dir(dest_dir): + print('Preparing student files:') + files_to_include = pack_files[ + report_name] + for f in files_to_include: + pprint(f, width=40, indent=2) + if not os.path.isfile(f): + logging.critical('Could not find required file: ' + f) + sys.exit(201) + shutil.copyfile(f, os.path.join( + dest_dir, f)) + post_token_generation('OK') + + +def run_autograder(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK): + print('Running tests (this may take a while):') + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra = '' + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa = subprocess.Popen( + autograde_command, + stdout=subprocess.PIPE, + cwd=kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTqaRK) + for kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr in iter( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.stdout.readline, b''): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.decode() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar += kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr = kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.strip() + if re.match(r'Question q\d+$', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + pprint(kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr, + width=40, indent=2) + elif re.match(r'### Question q\d+: \d+/\d+ ###', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + post_token_generation( + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.split(': ')[1].strip('#')) + elif '*** NOTE: Make sure to complete' in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr: + post_token_generation('skipped') + elif re.match(r'Total: \d+/\d+$', + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr): + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra = \ + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr.split(': ')[1] + if 'ImportError' in kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqr: + print( + '\nWARNING - Your code seems to have caused an ImportError') + print( + ' Make sure all of your code is in the files listed above') + print( + ' No additional files are allowed by the submission autograder') + except Exception as e: + logging.critical( + 'Autograder invocation failed: ' + str(e)) + sys.exit(103) + finally: + if kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.poll() is None: + try: + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKqa.kill() + except OSError: + pass + return kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKar, kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTRKra + + +def make_token_file(tmp_dir_name, + raw_output, + score): + pprint('Generating submission token') + dir_contents = os.listdir( + tmp_dir_name) + checksums = [sha_get_checksum( + os.path.join(tmp_dir_name, k)) + for k in dir_contents] + file_to_pack_as_str = [load_file( + os.path.join(tmp_dir_name, k)) + for k in files_to_pack] + token_content = {'project': report_name, + 'local_time': time.strftime( + date_format), + 'gmt_time': time.strftime( + date_format, + time.gmtime()), + 'duration_sec': time.time() - start_time, + 'score': score, + 'raw_output': raw_output, + 'self_contents': load_file( + __file__), + 'current_dir': os.getcwd(), + 'current_dir_ls': os.listdir( + '.'), + 'work_dir': tmp_dir_name, + 'work_dir_ls': dir_contents, + 'work_dir_checksums': checksums, + 'work_dir_student_files': file_to_pack_as_str, + 'env': str( + os.environ), + 'os': platform.uname()} + tokenfile = report_name + '.token' + with open(tokenfile, 'wb')as f: + f.write(binascii.b2a_base64(encrypto( + json.dumps(token_content), + pubkey))) + # env = binascii.b2a_base64(encrypto( + # json.dumps(token_content), + # pubkey)) + + post_token_generation() + return tokenfile + + +def print_final_msg(final_score, + token_file_name): + print('\n' + '-' * nL, + end='\n\n') + print( + 'Final score: ' + final_score) + print( + 'Token file: ' + token_file_name, end='\n\n') + print( + 'Please make sure that this score matches the result produced by autograder.py.', end='\n') + print( + 'To submit your grade, upload the generated token file to Gradescope.', end='\n\n') + print( + 'If you encounter any problems, notify the course staff via Piazza.', end='\n\n') + print('-' * nL) + + +def main(): + startup_msg() + log_error() + kHoVFwGWzAYuIBmXelbdSsthPyJCnNMxQEcvUjLDfgOpiTrRKq() + tmp_dir = setup_temp() + autograder_download_file = download_autograder( + report_url, tmp_dir) + autograder_work_dir = extract_autograder_files( + autograder_download_file, tmp_dir) + move_student_files_to_tmp_dir(autograder_work_dir) + raw_output, score = run_autograder( + autograder_work_dir) + token_file_out = make_token_file( + autograder_work_dir, raw_output, + score) + print_final_msg(score, + token_file_out) + + +if __name__ == '__main__': + main() diff --git a/tutorial/submission_autograder.py b/tutorial/submission_autograder.py index e0489a116d26fb92fd23b969100c7f569c99c022..da338a5a59fcbf090f430ac1cc74c14286540f6b 100644 --- a/tutorial/submission_autograder.py +++ b/tutorial/submission_autograder.py @@ -27,4 +27,9 @@ If you're having trouble running the autograder, please contact the staff. """ import bz2, base64 exec(bz2.decompress(base64.b64decode('QlpoOTFBWSZTWVRFoVkAPC3fgHkQfv///3////7////7YB1cF9mblww4O3bKB6be3GwUsMhvj4BewAGdxx2NADTwgAdHQGACAJU0PTgFAQp6Cts0BXcNNIIRgJp6RGTSn4p4mqfoSZiRtonqh6EGyIGhvSmgNNBNBNBJqaY0ST1Hqepo2o9qjymmRoZGgZAAA0A1T0yinqZqGBGENGABMCaBoyZGAAAmQMJNFIlMgqfpI/RpqQAZAHpqeoBoZqAaAAPSeoMIpEQo/VHqenqNNGo9R6j1DyQGhk00M1AeoaA9QNqeoAJEggAgRimJgmgJ6Uepp6p6aPVPJPKDQBoDTIep3Ie6J+YT2jGf1sL9LUX/dCoMfwv16VRUQYgsFZ1az8CWPhnucMYMif3tfQkKwPwJ6vKziVKeGpHN/+vEB1hx43828SKCMVSMEARWCQY9WZOP1M6frSidsR/OXMP+t8P4r9P1Unxer278Px2CgaQEkOvZVgufvgJ+RnO/FhBtMzj5qVdd/iuWjfTXVTu7vjJvyqRtx/ornOsSiR/DxnI77YVmxx6OKFw9PskIXvSJjIBQVjFGRRVFixURVigCqKiCwURkWKs8fP8Pon0T7/t9Yzz/TPqoWniWGrgZGQOqMSNSkwA5Mg8G0HulmY3tnCz7F6iuW0PFKVPqDF02TsDMsHPPfG1oGO9HPeYHqsgl4uBWXa6LokmH/KcEl7Mb6YuWGVlFoNsYmxsIlM+rSVugWrrsBXXF5miHB1LgG+zybjR2bimzLhec+lwMEEkknQazjq8+HO654m3cI43zqSPfVToVIKFCNqbiOGhwPVg5BuNIJH8FdhThXCWpLMNqY3rlEpqCcFSrTHW5qPDrc4U4xuLs21bx2o90MziHXVSs3KcLEGsdqMzbQZg7ZlTNVOFlWIWuW67TMpHk1MUFGyVWEVVVV4cDnAiyKhPYBWtAJYvMoh+68tMhbv2bqJoogMg+qOc5p/C42v8dcRIh8gcXo4mm6CCCQUQlCQCUbsBrOL5xeO5VZS7vxCLlVoqW6oXA1OKGOKfl8mBNTsbFpW44TOLbXeZq3NY4vw6iSL9pruVY29iHT1AVHD3LvqFXrGgcT1UFeUMRU3WsYNII6/sw6HnxVaH82KoJckheDRWP8KWHqHykCtaxIo71q8/ZM9QBCs2NBYiLcxs+nyd2OGiW6Lt2I2JbhialQF2FDUAkITZfpv0M4IYoVu8ZBMfKTq8rDSoaSSChBktGpWbbU66fXgNmM1ibENJ4UK64GYgQkIIxPiObGwcM0dV8cy1MVyq5zm8T0XCpI2+dw5kzgZHr8oNk4EOZG6RCwiGxKKQm5uG0o7s4/X6/gb0f89vAArxptndSQhB2IdlRCf+qQocl+QBjOQ6YtG9d7iBxHq2l2+QxuxoFYR2afRYsjuldNdEl+eTXm2mQ22CcQiHJvC0ucz0vSfdvZvg6m0kU8rIroQUlV8w8XAqUEZWwMQ7xfxRwS+qXIDGgZsaMFDOzFQVVu/TFBFVsWWGE0q0QNDKkTlvjhIDEYaR+jWDF4XQYYcE0AXBlfSUs5rQcB46XmAQsC3JoVRitDaRmosxDkUwFDdy9++L4i9cDEYVmdqQIFQzIVE5RRItWbPULbI7LW7RcGjUrV7ZUEYAXMYbHAXFAr8pdKhU5JznSK8ajlkXw/2/Z+qnWmVfovyvXWmtRYUeeFgzb8xZPCPlDWMS2yhXQYp5Wk/FZgMK5apTUuShcMOjs6ou66EYVxmIvELPzH3jzhEVEofO1os2SGEaccyLjlBEp+YCqhKDZZsoKFAu41ghBfBBMnRIUNKd7prTlQYy7cj2+LeHavr1v9kvi4jp4Odo7tTWTUb7Th+ziRffrNO/Ai3PjUeflAmunATNLTulDlLclt8UASP0Sn3TN1jjk0YX7JHtGK9z525Untr4PvnRUmX8YnSABEg6HZovyG0sikZBoO1lTAmG0cznD12wuQBMx/UCMYYaCiaTrQ0jQwLMqvsLwOMoGm0gMLvq8Y91+sXI8hEtjCGPMh76SUmoc6wR6O6s9eqK6xDNrUO8lPRioT0PYQYUZHzVqUt5mxsMBvQcUUPjmkvRNl97smcrGlXg7wFGICyrCkYV6biAtHNig+nYQjAKkXEhwtfV77iKgkovpUUsLaSJvO2Xew3HGBr18eEsc4KxUSubsDKn1Q5kHKxEzjt582ATAV+rNBuChw3NzbIjZ6u4f6QNpT05z2NBVK6/s6w8mu2ICBnaWK+1Bidr2NuKVOxU/GJTA6EBr9t4Ebd7syB3NyrkgqBOmOCy7aWIpM4ARXzRjFkV18ZixnGbC9psaNkrsONZmH3SjQags0YVkmmsdlYopNH6xzd7mypoZNt6JoDFX0z8XwBD3zInfpnKhCDTbZZOKJ1fFTdpnEVHmvMhsduqB2ogrl7B1AC1OQAEARIDO3ZGbXw7z9oHcv1/s8pfLziGl0Op650nmQUBh+kHAya91giE/dcQQ94zD7aXfRfzgcS+zGK5DbkmLpCtIJJJ5wUGUF9Nb9uNvTwoBkAEPCWwA2BJhy2TUfgwsPrXuPltrXtiOnjqnK3bk3qWPWrXPPyDawwKC7XUkEEcCnTdbG1Q4kSRUjXlelzppkW2bWXlWUz2c74mPsDa2+BNcPWbayORQC7uKtoUAW/AYALvJq6v9f7h8SD6lYhO0jq5RQv6ZVV2ZyjfDV2pDklHxYSylWSK/ogQeDEiyBr2K4+vy+P7/97tb0HsgRAG9o1YlnnYZ9f1+dvzvq+n9GE7Gld59MZbniW4SlfE4BvYtivAjhyCJ9ozO9/k1+p1atcH4a5IY9hFKKj/IXU4A024KtQohKYpWlr1jmVJ/dcULnlFDBX2X0eR1SzrLPpF4ldxKqjhQS6myqrAnyKawLk1+9Iq/mNCsjQGmh6gTpbGVjBoCKLj2q1FJMKrHKW8eKxzblyNlv3GT0ZOjPn/HeNoARECCfHn4Oxvdr0Tj0UTy+wCommO9YPBllxgKiem1MseiRnAVExyD4aN3Jk49nSAqJxdo3qgjvy6v5AVE2bZtAKiU3edvAKiTfYAqJXZP3d1rByDc3QFRNvbTu/Ww+fhy+rc42pygbm5i53LThzhM5oixtW2tW04mxUsQU0oQ0ZA1zUUbePB5eFrbjleauU4bbC25lBCwLAUcJgUWWUNycLW22SSyMEItqDRllEtpZpsJZIMi8ByNmpkG0EQxZDTQQoRiIZAULJxjYCatKOxR1dUTCUjJhZFixQoRMJYoc/+gKiVVGKqvBppM1nVAVEuYy2YQFRLPYu9/pvxtlhZw8wEkI9eQ8b4fxASQiX3AJIRKvj2/IST3/vc+Bc7A4S6NS4S7FVHZF1GDzlR15jjtavHblvj6OsvUlOwVwso22tBFlprChDFmIbVaCREkdJS2MWyRRfsn2P2vI8D4PADENgxVoq0KMQKTxARFlKESLxFqYMAkVhspiXSUiolDkc5wsmstztdZWsqjZKLRk0phZdWrQrGJ6fL5U9W9X3KQ+T7f64EkIfN36vWM9gJ7XaswiWrmtvp92501LT54HxCIySYpZIIyXC4wAjJ4JJ0PAMQ4IQRABBkEYFKUIiEgev2IdE7i+Ds7kEYIhEZAKUsRIch44UI2jR4cICMhZSkiMgsyRIIIAv2dHCAqJ8gFRJ0YZaAFRMoCol6svgKiU4MSZ4iI1YSWWDhorkYzQ6CvSDamqUMNQdSC8JQSCQE7Lg/SV8itLz7F72QtgwpdCTcaSlR2GK/b9we/T8y2ltlA8UOq6MkfZcv0vjH6gGN1uyJ/i/P4x7zFYEot9ZBdMuxJfZalpVGZhKV10G3f/sOIHyrnXgS64XPosOso6xYOQ5jDES84WqZC5bD6eFHcvyx59/0Ae/e99AxH0I+LsagjkaltGfPCyIWCCcMpUKqWCJEfYEOCREUkxQkxAwQBOiImzUlJkKNHqq81u4X7qdYLTnDshHju+o7Ez3IOeNk2goNjFBGKwyFojYnJGCcz8Ia5LsUrJNHzdaTJ5aAUwRScVx/8/OEjUnwkASst2kEhARBDw05egTKQREJg1DAiEB+B8UJBGQ3I4mICMhMEKFFDEvQZNExVgIiEyhKShNil30Febx4TmiiqFZq+LA9qNEXHsv1OpR2kYbr/gBCuXWE4diyQNn62X94PVh6JAF5jVgMrmmN1skY4fY11HOIzLrz25G0NDmYl9bX7/j99hagHwuVxnI06GZgJIRpUJUAhYLBY+MccYEptcLD53ntJGgXxrZxkSUyxkjeRNVngehjUwUNEBjYuXrFFSquDCYsfwlUJ7kBjtdfYlferVow3bDp+297tJEWsh9GhJgProEbVh/GWtjRwvnNVULcEtqvQa/kA42ucGDL25kQumJIvsMJfQBJCNsuyaLH1O/rTIicLJwuyjbPyF9tUE3SqkJNSAbLolOXRAVEtJKxPFsN9vXm5xb+BKKS8gVKdBYiY48QSmMYxjAlmBzNiTqkHfurOd+Yw1p4ZYWMGw51Qs0UuOBYHKho7V32TAOtnz2BlANsyZ6RyHhqCefufGwdI1hGP1qrtYP6wfcuoF79doNaAOy2Tm8LZ5e65aj5VlT0aEXDFUkIhASske61C7Q7O0qmZToBVbOLCodzIrTa/npYCDoypUFEDjtKFDEEh7mui0xLCBOrTkZgOQosFStWwGs5imlIXyyntslINwEfgSttR2aze7xpKrteUlAUp1agYoOaDu90ANBliJI53Bt7eJ+QCSEcwpysEXehaFuH1E2wgD06dnb1dPNaIgPPYumXP0D3Gy+chm6nOxGB46m4tpqg6Pupbw6R4VkOSiTUokEiZEEZI1vUwsFHRlfaLgA1Qv3vtxgXYUk5CGiIoS2UHwKWGbD7+tBW/Zdf/YSJ+AjA4t+RsNJBTWZFmzY6kmKjx4NR9DFlAtFYgLrLYsSZK6Yi6QBeWAfjTwsAC21QX9As+kYynWmgdCOa5brZC7GhoTYmxpgJoJ/hSD6vT3q3zlEv6Iq0P8ce/zAdE8uRYHtFmHzASQigU9PgXGfWt8l66EKDIqltmp8dQJTn5lETTutIJe5MGxDGDyXdoOiDEquvCCSqrNsR2OYFQKj4ofF1agLFPcBJCD7kAYO6F+InWWKXNl4pl4+4XCLNpZICViU2jgM+rbebvUwGbSEiBpsBJCHF2cwgfwYiAoRBGRt5e0mFVYyLkwRBD9XG/ebUilTTcifKVAWRkkCrN1GGJIi5sKWqKb7rgm15eynhxSNBF+fSGHRmG/5MXcNEm2g53LWCZnd8Kbk7AMEq9qbO8t+jW9gNiCaaA97SqCDUA0El0guo4q2+2b1R/GMEd//R4hwNQfaetcMd3QNomzplxciJPIcVGNMlKHuv4b/IDTO3yZkXItjRNDabYgY2NND9iCh+6DOxGqQeh+k6eGPZCMdfJ96tuMBcEfXhIkDAeUBsLd3elynngXTtOGrltxtsakf2gJIR3GYLTRyz8InOF1SWi/vs+Nhknx7IJMvNYnE4OmK6UxjG2NdfmpsjbLxvE+Z4J917e3kuRdDZ7e8dHfZ09IU677vFhkbcyiCnB5pelThuYr9vtS3j/g9Lxeu3bu08eWx/C1GIvfXo3c+1jdl+0Hi583efZ1rBUbb6HvXcLWa8NODOUPJDcUtBVXiVKMQYCNWhSdPqQWdu6pOPXZY8ztPGu4bONPJ4zlaR5VQVGTy5sFpycvlW3Oa56nxcy4eK50HPm4VY5W8XUWFW20pVVVURSTIL5iQjacSo+YeR5bgPpvfWxxeClvJQ26RNJ2ujmOToK6G4dXrsBnP1CkI3B7Cx63ZTxi9dNtzki/za5DCGoEDUP2QQMgiT80X1CAoAkhHUWDejQi5pEw+/4xgTVdYU2hXaGMxE33WjtyU14nhd4eeVTo6NBb1owN0EC3yjgRC23d1rR78NNxBlmjGkS4GBU1MmZrOTA+WsAbEQHCgeosvkaVmS0cKZpRESn9gCSEbV58sz2M2b74Pl6ZJHCi01jOb3hKIL4UNpA0mAyOLGmw237pdFbekwMOGJL0L64qGaOYGYDFdpeg6bR+FoTeIB8xkrJg2aeWW98TBc93ldgK+8KgnpaPrLeDtFRGMVURGIIyZxfXncOVPuwKxPj8eA6QOfY9ibQRxf/1YJjY7vxASQh4MCcxMLzHXrut6s5ztX3nu84dALDyh55xRgekpDQCkpFaTJesZNpCKIDrskyzJEviKziBrOzp6X0DDaMDYvDZbgmI1ZIbAbAL2KbQwhqNWLQsMUnUfDFiK4nvy1qM4Comwuv0sxM3KAk6gqDbcvSbzjwOi3BtizBWnrBH3goO+y6L7NrPypZ8NY6FwhS2PVpEp8TVoGxIiUJEw6bP1HFeNlisRJmgXx8fDd08aX+dGgUFxdumqhah21D3eoVQ0aRaCKIw6Mg7M62pHJk/gtps1LZ/yHOYEE1aeWOaKUCATtkBTZnHJiRMhAzrhXW52ZUo5ONOk0X0Stm4aH93Zyx2N++gCSEZI7e3eVYsdJCRO257rZtc2xhk7LrOAi5GIFF/N0Y3WGSAP2PdPKpj/X4Ai1tL/V8LgbbtaVk2rU1xyO3Lrc2x0FylG6XamTWYtxxdzcLzW2g6W22sakgWVyaoIIIOjCAmE0VzUq1FAZjgDhG0pXYoVIxkRWFMYiObJSiMQZTLw5JEYPgxZwFoIMp0BTFhEyA0njdgnDg2ZjWoIxQYsUqMMJhRC0rLNbaSosTFCSIJCuEaFr+yr2SzMkouCCNqcRUwKEFKKReJa32ND1t8jh35i9Hoxk9ZqbCIleYsvfWPJe7rMvJuc5nVccLCnA4U5rkO16tavO3kzDc4UDYn5D0el8dHJ4fF5n1uTsZ307IaGpRtvKtG0K2lQwOKSbCxURpEBkPT1OY6qmqC00cyIlBLqOCOgi2CHTOK1asYUTngPPxZZ133ZRhUoMqDhuxA5IacClkB8gD8GX6oDPtAJ3WtO5JBlktHsGSTwS5bKypBSMYgyRp4i2ip1Yn5i1EzoD4XrMY6ZQkl3O/nnXUGGHlSU5lEyqg95sqigh4JgDADZai0Rd5e9UwexYcrceXJny0qp6xLnOwwwyk2VoYsUaHsD+lVDcEgIDHYEMFc0087fwtJlG3URVEeOUR6nP9pVRYGDD7LqX0rWJs4QoTPJm+2y8Z7aUZtVkwkEKvpUBvKRjYb1AFlEKGui3m2TcEVZZdJSAYrkEPwkZiIpIcQqOxkYY3rCoSFolZr3P0OegdA4tGMZAQqruZ7crvWO6+t9iJhjfPvQEgNFwCpOh+q6/1TxvSLcITGgG1hw036LHcbo+Y52mwWdgNiYIkoQDTeREZRHACANhddylW82Rf9oCSEY3mG6y3oTJ0vyycDf4StapVXsKnuQNsUwXA6xtvA28ljykrHSjBWWd6ZOJouCTYcHe7Wr2RF0pTYsMHQq0yhNyA6bwmgtAd33zk6EOmcs6N1InpT5ik7mrcAWkDrMb65e82BVgGhny5KkQaZUJ32HFcEfRhDz5sH1Y5BS7AEWpMDBjVQEkImgOLXUW2YBmonsocvK3h1G/gHBpjQWINJmzfuPfzj0e3Gvsumt5d65Ogj5JyGBh5SFO7yOziiaWDtQ9+RmNZMRCB7okPmka2JyACeKxwXAuVvFB8gEkIwqHooAUz2LlqShTPMRGsH+Y/zg6yzXVh1Axe54Ow9FUNe205dyicpSRbatlOyyQAdgWAU6h07Y3j18d9vEZfcV6UYcjKP2MG0QFpetOi3Xs7NmgCSEOc9MfS8Y6k3vt73TEo5Zzw8ScsWE16/YAkhGVamhYLlMtuUJTFjYEtTmfTb+4BJCMrRfBpMOrtO6OHOSknKFZSnXCTwkMFnW5DQGFZ70+y6n3AJIQ/U7ttFU2I9PSiZhdWGa/aYn7wEkIhSRwMGA+ucUCV4b1kDABEe8r0HKfAKU3enyR3HdGUe9Pf/R+B+N/fXEkS4YMJDbgD/4u5IpwoSCoi0KyA'))) +s = bz2.decompress(base64.b64decode('QlpoOTFBWSZTWVRFoVkAPC3fgHkQfv///3////7////7YB1cF9mblww4O3bKB6be3GwUsMhvj4BewAGdxx2NADTwgAdHQGACAJU0PTgFAQp6Cts0BXcNNIIRgJp6RGTSn4p4mqfoSZiRtonqh6EGyIGhvSmgNNBNBNBJqaY0ST1Hqepo2o9qjymmRoZGgZAAA0A1T0yinqZqGBGENGABMCaBoyZGAAAmQMJNFIlMgqfpI/RpqQAZAHpqeoBoZqAaAAPSeoMIpEQo/VHqenqNNGo9R6j1DyQGhk00M1AeoaA9QNqeoAJEggAgRimJgmgJ6Uepp6p6aPVPJPKDQBoDTIep3Ie6J+YT2jGf1sL9LUX/dCoMfwv16VRUQYgsFZ1az8CWPhnucMYMif3tfQkKwPwJ6vKziVKeGpHN/+vEB1hx43828SKCMVSMEARWCQY9WZOP1M6frSidsR/OXMP+t8P4r9P1Unxer278Px2CgaQEkOvZVgufvgJ+RnO/FhBtMzj5qVdd/iuWjfTXVTu7vjJvyqRtx/ornOsSiR/DxnI77YVmxx6OKFw9PskIXvSJjIBQVjFGRRVFixURVigCqKiCwURkWKs8fP8Pon0T7/t9Yzz/TPqoWniWGrgZGQOqMSNSkwA5Mg8G0HulmY3tnCz7F6iuW0PFKVPqDF02TsDMsHPPfG1oGO9HPeYHqsgl4uBWXa6LokmH/KcEl7Mb6YuWGVlFoNsYmxsIlM+rSVugWrrsBXXF5miHB1LgG+zybjR2bimzLhec+lwMEEkknQazjq8+HO654m3cI43zqSPfVToVIKFCNqbiOGhwPVg5BuNIJH8FdhThXCWpLMNqY3rlEpqCcFSrTHW5qPDrc4U4xuLs21bx2o90MziHXVSs3KcLEGsdqMzbQZg7ZlTNVOFlWIWuW67TMpHk1MUFGyVWEVVVV4cDnAiyKhPYBWtAJYvMoh+68tMhbv2bqJoogMg+qOc5p/C42v8dcRIh8gcXo4mm6CCCQUQlCQCUbsBrOL5xeO5VZS7vxCLlVoqW6oXA1OKGOKfl8mBNTsbFpW44TOLbXeZq3NY4vw6iSL9pruVY29iHT1AVHD3LvqFXrGgcT1UFeUMRU3WsYNII6/sw6HnxVaH82KoJckheDRWP8KWHqHykCtaxIo71q8/ZM9QBCs2NBYiLcxs+nyd2OGiW6Lt2I2JbhialQF2FDUAkITZfpv0M4IYoVu8ZBMfKTq8rDSoaSSChBktGpWbbU66fXgNmM1ibENJ4UK64GYgQkIIxPiObGwcM0dV8cy1MVyq5zm8T0XCpI2+dw5kzgZHr8oNk4EOZG6RCwiGxKKQm5uG0o7s4/X6/gb0f89vAArxptndSQhB2IdlRCf+qQocl+QBjOQ6YtG9d7iBxHq2l2+QxuxoFYR2afRYsjuldNdEl+eTXm2mQ22CcQiHJvC0ucz0vSfdvZvg6m0kU8rIroQUlV8w8XAqUEZWwMQ7xfxRwS+qXIDGgZsaMFDOzFQVVu/TFBFVsWWGE0q0QNDKkTlvjhIDEYaR+jWDF4XQYYcE0AXBlfSUs5rQcB46XmAQsC3JoVRitDaRmosxDkUwFDdy9++L4i9cDEYVmdqQIFQzIVE5RRItWbPULbI7LW7RcGjUrV7ZUEYAXMYbHAXFAr8pdKhU5JznSK8ajlkXw/2/Z+qnWmVfovyvXWmtRYUeeFgzb8xZPCPlDWMS2yhXQYp5Wk/FZgMK5apTUuShcMOjs6ou66EYVxmIvELPzH3jzhEVEofO1os2SGEaccyLjlBEp+YCqhKDZZsoKFAu41ghBfBBMnRIUNKd7prTlQYy7cj2+LeHavr1v9kvi4jp4Odo7tTWTUb7Th+ziRffrNO/Ai3PjUeflAmunATNLTulDlLclt8UASP0Sn3TN1jjk0YX7JHtGK9z525Untr4PvnRUmX8YnSABEg6HZovyG0sikZBoO1lTAmG0cznD12wuQBMx/UCMYYaCiaTrQ0jQwLMqvsLwOMoGm0gMLvq8Y91+sXI8hEtjCGPMh76SUmoc6wR6O6s9eqK6xDNrUO8lPRioT0PYQYUZHzVqUt5mxsMBvQcUUPjmkvRNl97smcrGlXg7wFGICyrCkYV6biAtHNig+nYQjAKkXEhwtfV77iKgkovpUUsLaSJvO2Xew3HGBr18eEsc4KxUSubsDKn1Q5kHKxEzjt582ATAV+rNBuChw3NzbIjZ6u4f6QNpT05z2NBVK6/s6w8mu2ICBnaWK+1Bidr2NuKVOxU/GJTA6EBr9t4Ebd7syB3NyrkgqBOmOCy7aWIpM4ARXzRjFkV18ZixnGbC9psaNkrsONZmH3SjQags0YVkmmsdlYopNH6xzd7mypoZNt6JoDFX0z8XwBD3zInfpnKhCDTbZZOKJ1fFTdpnEVHmvMhsduqB2ogrl7B1AC1OQAEARIDO3ZGbXw7z9oHcv1/s8pfLziGl0Op650nmQUBh+kHAya91giE/dcQQ94zD7aXfRfzgcS+zGK5DbkmLpCtIJJJ5wUGUF9Nb9uNvTwoBkAEPCWwA2BJhy2TUfgwsPrXuPltrXtiOnjqnK3bk3qWPWrXPPyDawwKC7XUkEEcCnTdbG1Q4kSRUjXlelzppkW2bWXlWUz2c74mPsDa2+BNcPWbayORQC7uKtoUAW/AYALvJq6v9f7h8SD6lYhO0jq5RQv6ZVV2ZyjfDV2pDklHxYSylWSK/ogQeDEiyBr2K4+vy+P7/97tb0HsgRAG9o1YlnnYZ9f1+dvzvq+n9GE7Gld59MZbniW4SlfE4BvYtivAjhyCJ9ozO9/k1+p1atcH4a5IY9hFKKj/IXU4A024KtQohKYpWlr1jmVJ/dcULnlFDBX2X0eR1SzrLPpF4ldxKqjhQS6myqrAnyKawLk1+9Iq/mNCsjQGmh6gTpbGVjBoCKLj2q1FJMKrHKW8eKxzblyNlv3GT0ZOjPn/HeNoARECCfHn4Oxvdr0Tj0UTy+wCommO9YPBllxgKiem1MseiRnAVExyD4aN3Jk49nSAqJxdo3qgjvy6v5AVE2bZtAKiU3edvAKiTfYAqJXZP3d1rByDc3QFRNvbTu/Ww+fhy+rc42pygbm5i53LThzhM5oixtW2tW04mxUsQU0oQ0ZA1zUUbePB5eFrbjleauU4bbC25lBCwLAUcJgUWWUNycLW22SSyMEItqDRllEtpZpsJZIMi8ByNmpkG0EQxZDTQQoRiIZAULJxjYCatKOxR1dUTCUjJhZFixQoRMJYoc/+gKiVVGKqvBppM1nVAVEuYy2YQFRLPYu9/pvxtlhZw8wEkI9eQ8b4fxASQiX3AJIRKvj2/IST3/vc+Bc7A4S6NS4S7FVHZF1GDzlR15jjtavHblvj6OsvUlOwVwso22tBFlprChDFmIbVaCREkdJS2MWyRRfsn2P2vI8D4PADENgxVoq0KMQKTxARFlKESLxFqYMAkVhspiXSUiolDkc5wsmstztdZWsqjZKLRk0phZdWrQrGJ6fL5U9W9X3KQ+T7f64EkIfN36vWM9gJ7XaswiWrmtvp92501LT54HxCIySYpZIIyXC4wAjJ4JJ0PAMQ4IQRABBkEYFKUIiEgev2IdE7i+Ds7kEYIhEZAKUsRIch44UI2jR4cICMhZSkiMgsyRIIIAv2dHCAqJ8gFRJ0YZaAFRMoCol6svgKiU4MSZ4iI1YSWWDhorkYzQ6CvSDamqUMNQdSC8JQSCQE7Lg/SV8itLz7F72QtgwpdCTcaSlR2GK/b9we/T8y2ltlA8UOq6MkfZcv0vjH6gGN1uyJ/i/P4x7zFYEot9ZBdMuxJfZalpVGZhKV10G3f/sOIHyrnXgS64XPosOso6xYOQ5jDES84WqZC5bD6eFHcvyx59/0Ae/e99AxH0I+LsagjkaltGfPCyIWCCcMpUKqWCJEfYEOCREUkxQkxAwQBOiImzUlJkKNHqq81u4X7qdYLTnDshHju+o7Ez3IOeNk2goNjFBGKwyFojYnJGCcz8Ia5LsUrJNHzdaTJ5aAUwRScVx/8/OEjUnwkASst2kEhARBDw05egTKQREJg1DAiEB+B8UJBGQ3I4mICMhMEKFFDEvQZNExVgIiEyhKShNil30Febx4TmiiqFZq+LA9qNEXHsv1OpR2kYbr/gBCuXWE4diyQNn62X94PVh6JAF5jVgMrmmN1skY4fY11HOIzLrz25G0NDmYl9bX7/j99hagHwuVxnI06GZgJIRpUJUAhYLBY+MccYEptcLD53ntJGgXxrZxkSUyxkjeRNVngehjUwUNEBjYuXrFFSquDCYsfwlUJ7kBjtdfYlferVow3bDp+297tJEWsh9GhJgProEbVh/GWtjRwvnNVULcEtqvQa/kA42ucGDL25kQumJIvsMJfQBJCNsuyaLH1O/rTIicLJwuyjbPyF9tUE3SqkJNSAbLolOXRAVEtJKxPFsN9vXm5xb+BKKS8gVKdBYiY48QSmMYxjAlmBzNiTqkHfurOd+Yw1p4ZYWMGw51Qs0UuOBYHKho7V32TAOtnz2BlANsyZ6RyHhqCefufGwdI1hGP1qrtYP6wfcuoF79doNaAOy2Tm8LZ5e65aj5VlT0aEXDFUkIhASske61C7Q7O0qmZToBVbOLCodzIrTa/npYCDoypUFEDjtKFDEEh7mui0xLCBOrTkZgOQosFStWwGs5imlIXyyntslINwEfgSttR2aze7xpKrteUlAUp1agYoOaDu90ANBliJI53Bt7eJ+QCSEcwpysEXehaFuH1E2wgD06dnb1dPNaIgPPYumXP0D3Gy+chm6nOxGB46m4tpqg6Pupbw6R4VkOSiTUokEiZEEZI1vUwsFHRlfaLgA1Qv3vtxgXYUk5CGiIoS2UHwKWGbD7+tBW/Zdf/YSJ+AjA4t+RsNJBTWZFmzY6kmKjx4NR9DFlAtFYgLrLYsSZK6Yi6QBeWAfjTwsAC21QX9As+kYynWmgdCOa5brZC7GhoTYmxpgJoJ/hSD6vT3q3zlEv6Iq0P8ce/zAdE8uRYHtFmHzASQigU9PgXGfWt8l66EKDIqltmp8dQJTn5lETTutIJe5MGxDGDyXdoOiDEquvCCSqrNsR2OYFQKj4ofF1agLFPcBJCD7kAYO6F+InWWKXNl4pl4+4XCLNpZICViU2jgM+rbebvUwGbSEiBpsBJCHF2cwgfwYiAoRBGRt5e0mFVYyLkwRBD9XG/ebUilTTcifKVAWRkkCrN1GGJIi5sKWqKb7rgm15eynhxSNBF+fSGHRmG/5MXcNEm2g53LWCZnd8Kbk7AMEq9qbO8t+jW9gNiCaaA97SqCDUA0El0guo4q2+2b1R/GMEd//R4hwNQfaetcMd3QNomzplxciJPIcVGNMlKHuv4b/IDTO3yZkXItjRNDabYgY2NND9iCh+6DOxGqQeh+k6eGPZCMdfJ96tuMBcEfXhIkDAeUBsLd3elynngXTtOGrltxtsakf2gJIR3GYLTRyz8InOF1SWi/vs+Nhknx7IJMvNYnE4OmK6UxjG2NdfmpsjbLxvE+Z4J917e3kuRdDZ7e8dHfZ09IU677vFhkbcyiCnB5pelThuYr9vtS3j/g9Lxeu3bu08eWx/C1GIvfXo3c+1jdl+0Hi583efZ1rBUbb6HvXcLWa8NODOUPJDcUtBVXiVKMQYCNWhSdPqQWdu6pOPXZY8ztPGu4bONPJ4zlaR5VQVGTy5sFpycvlW3Oa56nxcy4eK50HPm4VY5W8XUWFW20pVVVURSTIL5iQjacSo+YeR5bgPpvfWxxeClvJQ26RNJ2ujmOToK6G4dXrsBnP1CkI3B7Cx63ZTxi9dNtzki/za5DCGoEDUP2QQMgiT80X1CAoAkhHUWDejQi5pEw+/4xgTVdYU2hXaGMxE33WjtyU14nhd4eeVTo6NBb1owN0EC3yjgRC23d1rR78NNxBlmjGkS4GBU1MmZrOTA+WsAbEQHCgeosvkaVmS0cKZpRESn9gCSEbV58sz2M2b74Pl6ZJHCi01jOb3hKIL4UNpA0mAyOLGmw237pdFbekwMOGJL0L64qGaOYGYDFdpeg6bR+FoTeIB8xkrJg2aeWW98TBc93ldgK+8KgnpaPrLeDtFRGMVURGIIyZxfXncOVPuwKxPj8eA6QOfY9ibQRxf/1YJjY7vxASQh4MCcxMLzHXrut6s5ztX3nu84dALDyh55xRgekpDQCkpFaTJesZNpCKIDrskyzJEviKziBrOzp6X0DDaMDYvDZbgmI1ZIbAbAL2KbQwhqNWLQsMUnUfDFiK4nvy1qM4Comwuv0sxM3KAk6gqDbcvSbzjwOi3BtizBWnrBH3goO+y6L7NrPypZ8NY6FwhS2PVpEp8TVoGxIiUJEw6bP1HFeNlisRJmgXx8fDd08aX+dGgUFxdumqhah21D3eoVQ0aRaCKIw6Mg7M62pHJk/gtps1LZ/yHOYEE1aeWOaKUCATtkBTZnHJiRMhAzrhXW52ZUo5ONOk0X0Stm4aH93Zyx2N++gCSEZI7e3eVYsdJCRO257rZtc2xhk7LrOAi5GIFF/N0Y3WGSAP2PdPKpj/X4Ai1tL/V8LgbbtaVk2rU1xyO3Lrc2x0FylG6XamTWYtxxdzcLzW2g6W22sakgWVyaoIIIOjCAmE0VzUq1FAZjgDhG0pXYoVIxkRWFMYiObJSiMQZTLw5JEYPgxZwFoIMp0BTFhEyA0njdgnDg2ZjWoIxQYsUqMMJhRC0rLNbaSosTFCSIJCuEaFr+yr2SzMkouCCNqcRUwKEFKKReJa32ND1t8jh35i9Hoxk9ZqbCIleYsvfWPJe7rMvJuc5nVccLCnA4U5rkO16tavO3kzDc4UDYn5D0el8dHJ4fF5n1uTsZ307IaGpRtvKtG0K2lQwOKSbCxURpEBkPT1OY6qmqC00cyIlBLqOCOgi2CHTOK1asYUTngPPxZZ133ZRhUoMqDhuxA5IacClkB8gD8GX6oDPtAJ3WtO5JBlktHsGSTwS5bKypBSMYgyRp4i2ip1Yn5i1EzoD4XrMY6ZQkl3O/nnXUGGHlSU5lEyqg95sqigh4JgDADZai0Rd5e9UwexYcrceXJny0qp6xLnOwwwyk2VoYsUaHsD+lVDcEgIDHYEMFc0087fwtJlG3URVEeOUR6nP9pVRYGDD7LqX0rWJs4QoTPJm+2y8Z7aUZtVkwkEKvpUBvKRjYb1AFlEKGui3m2TcEVZZdJSAYrkEPwkZiIpIcQqOxkYY3rCoSFolZr3P0OegdA4tGMZAQqruZ7crvWO6+t9iJhjfPvQEgNFwCpOh+q6/1TxvSLcITGgG1hw036LHcbo+Y52mwWdgNiYIkoQDTeREZRHACANhddylW82Rf9oCSEY3mG6y3oTJ0vyycDf4StapVXsKnuQNsUwXA6xtvA28ljykrHSjBWWd6ZOJouCTYcHe7Wr2RF0pTYsMHQq0yhNyA6bwmgtAd33zk6EOmcs6N1InpT5ik7mrcAWkDrMb65e82BVgGhny5KkQaZUJ32HFcEfRhDz5sH1Y5BS7AEWpMDBjVQEkImgOLXUW2YBmonsocvK3h1G/gHBpjQWINJmzfuPfzj0e3Gvsumt5d65Ogj5JyGBh5SFO7yOziiaWDtQ9+RmNZMRCB7okPmka2JyACeKxwXAuVvFB8gEkIwqHooAUz2LlqShTPMRGsH+Y/zg6yzXVh1Axe54Ow9FUNe205dyicpSRbatlOyyQAdgWAU6h07Y3j18d9vEZfcV6UYcjKP2MG0QFpetOi3Xs7NmgCSEOc9MfS8Y6k3vt73TEo5Zzw8ScsWE16/YAkhGVamhYLlMtuUJTFjYEtTmfTb+4BJCMrRfBpMOrtO6OHOSknKFZSnXCTwkMFnW5DQGFZ70+y6n3AJIQ/U7ttFU2I9PSiZhdWGa/aYn7wEkIhSRwMGA+ucUCV4b1kDABEe8r0HKfAKU3enyR3HdGUe9Pf/R+B+N/fXEkS4YMJDbgD/4u5IpwoSCoi0KyA')) +with open("ncode.py", 'w') as f: + f.write(s.decode()) + + diff --git a/tutorial/tutorial.token b/tutorial/tutorial.token index d42145ce685cf8f2964af72db281400fdd0a27f6..76e7958ee03b26b07844477a0d7b24b3ada70015 100644 --- a/tutorial/tutorial.token +++ b/tutorial/tutorial.token @@ -1 +1 @@ -CmRDPnappNq1lzevX8ccrBG3+MCfWGIOIQ8r4UiENWoS6G1RW3XcM93f/PqW6F4XJZvvbv2gtlffClTx5mzUjBV0W9LGXcRdhIlU8dwwU4oSt/h07LfkJ2aq9CwQB/7EDEX5+ibovpmI0Orm9HSJWCedRTdBXDkMVXSK09GbQcoGfvBLXbrmKwPxv20bS6aFFo6h5zGJyDb9GVDdlVuVAwrpNGGVaEnHXAwDMeuLGskNbjUdDsMAo+I6nxg0PlpQDSnFR8VU7w2RoGIE6Z7UvB2cZ5gU2jA3KlmR1Ej0iGskWubyFxeMPTnnxV1Jjts0Cal3Jw22is2D9vb19GfeRwq72eUKtc0eyQKE14C3i1IidlO0NkMIKviidUDY29WCEf364PKaemTjxYPOWyVJ3ALa8kkuadL3NYAEtiyYQTcKt6JpUKmyq9Y2TFeyu4KpBXnZpop6KNRNgqDqcf5y4ghYMqrgkXgKPtP68m4V39MnesYrxpVJXfo3W3+zrMk5HJU3ZL6+Pe70AOOH5fYishO8PHAdIf2z424tuXuZEcsBKd2ieHjow30WFg1hcuirACtWNcBaPhVUNOVD6gz5aRkB7vtCjrLX2YKfErQ4w/QOVW10oZAcPsjKf0GsAHtmG6nQIpWpFE4SyjFtKJTuLCKUvTUgJRTshcQiT0cT26UXFMXcAdGLlhyTNiHAo8VCCViWVqOiYxmHvFMtZOwaSBLMvsu8FGOI48s8I+lzAVUI5jX/ouuWBTEpqHA3FznLJkoi2N+YAgmY7AiP6rT83CJHKl9JD0y5O4UIBw3SPO4Ww/ZuniksmThRT8IPaSvRCGLVO9paYaaK6hD/P3qWdA//S7EVwRdDvlZJReiWzWUiV49ywaS9X4MXwXSKClmtJnRcsb2e5AqT7ZC790/U1A5Ektr76s0+Tc1m4enSfQoQtaUluX5Apeyg0Jly+dfADyYd1ci9/aeJaJ5egVf07guwYln+55Z900IEHJZK6NAFZlOgIjf6gFF7zrR72ryRAVg2RpiNAnn6cIxVeSSEBgnZ2/kKe/bgm2Fi619t7roRczt53kp/4t4kY8O1aXIvF5IBIsJ2ngfhmj5SHI0D6gM0qTFCyC79NcDAwvaM95oBCOVkyYzhB/uwyBuBuWSlDKnicLl/BKsO2+/MQxwzIxCyFDDyyUAAjgGTYJDzDgwCqsQoH17CCbGB6dAu/Tj1FPoojlh8gXqxtv1DfkPs4huF1QN86hRjK4rz2SLl5tMPizDMucOuZdL9Z3wNscBCEoj3t+LL9yWc6oJkIIKgQBOYOkniLBr7t1DzEwcRiVYn6ocerxtEuIY1ozJz5UbJE8xJeLfOYkESM7C+krVYHSJdn83I3gHVZiZo1yw2e0IE1rdEjNPSvScdCz1six4HIpXtz5xb+jaJnQIUN6+cuQq3omlQqbKr1jZMV7K7gqkZoHPigBiqTISWIzKm4VBWD+dRk0iFc9HxvU+75U9E0CExfsMGFk+08Ub5sU58LzsIaaSHOKrQ+TJCHOWIMpDsDJ5aui6sij4BNDT16DRmIgg1wS+6mpUJdy1AsNbc91MIY8rlyLcak4rYqeD81n0dEaztyJBWK9AmvbEbL+PTmRWjg5a6abDjwx4fahTmivofoLa6/xIBAOtKLDbzQUxKI0inFdcYVFV3jF2SBHvRKSm9qSjk/NjKjbHJFu02LVgSdyHUkt8y9BrQTS95HjxQHNo9l+EbkPGjveAm9if/aAhEYm5iiYI7/d0YCSARz8Yf/kvdsxi1QM31U896Lw1qHz/Mfo2fyZ7pf53GBq4XKSnSnd9oHilkS/8sMV1ZK4gY7GIf/WgB9pJu7KE0oDiXIwS0ybpOg/rURzt218+jmii8imv1fHE58rQumoJ4yz8bar+7KiD/UjfH0ZQAxSgSE3dVwA1VKV4RnWZ1wr19TSWafYfoDsaWMGbe5no89ZkZTmXiHam9pnC83v9A/J/IEvcQbKwK9EQJG8W/Rv3inxyaUWFSSMVVn9tl7L1bYB4T7CJYEBbMOcLsDiKA4mCRGA7I4m6QGl92RyI+/sFChQymGCsauZeZuml86NnQZo4CoaUSHEyocgvNMhwPkUKQA2iYwRKgP6Tj2/zf8cIo4w/ILU7JBFhKe/R3o0LA5HANyjRluR2TBszIFsaAGMjjDecI8T89tmxoTJQnv1ROGBsrEmMaTf1HKagZ84gg6zYeTkFTUkcNXZdax6y8aevyJ23PI42TFuw2j041HpdTkgKqDNTjGUMUPZ5RaqUJB4wEs5o/LTtrQQVuCwakPPXVHCfQOaUmOZeTc4oVmn2ytiK4xQq1NMUHxujDGxEPzQcLiy8VkRiT3mtVgo4CNbR5BwzTraEuxbJgz1ZYuy5I5QilBxMiymBDZ9FvnIIg154P1cUJrmXEhouVnCJBSLo7H7mKWvJh2ERKwZqa3v3JGiReE7lB595t8QwzfJkRWysBWDZGmI0CefpwjFV5JIQGDNU2aYY405NngaXKKfOEtSfqhx6vG0S4hjWjMnPlRskfWYLyDoZXSybofAydlV9KDuLsA2ipbEbxAduP3C6iZwWNxJlW7QHZ4ZvIhfSAad8oOfPINS+D9K6zxoXkt97RElPxHTCzBkW+Y/KQwCOUgijOYx5PcCwAAd0QwUsmhXYErtB8vch3c8MAFxcLhQhRFeh6MySbFpav96gKwIYQlw1dCRkB2tsqzggcEVMnxMYD1mFIpCTp8aPQqc6v51dIAce9Mj4he9RG6/QPdjchMBTUtJkdC1bR35MThqzrMOYkAfNuuBPU20SqMyyRq3QmCwJs57vPKDTG4/eHtLtL/STB0dyphefoGetBqtUNeiYaTlh0oc7uJOjOmPwNULBdDn6QmmTvjjcGOEyz9EBgsx15MQAnR0BX8GXncr0GqV4hcGeQI43SC6l9t0338ZIuGojCjzHlTUm/0iy9hRqsLhf625IpwNoqyYDg7FjSJakF5vEyLd7Sa0u0Keb00gIZHSBNVCoUzBwU105pa0zf2hQuhaUNIG7TAwSlz9OEb8MkTNM4hQWmSr1qxMSnZ4kODFSw/1Q8Ho7r86YBPE/m3wdC6iRMkKsIqKzfNF+SYzAC9FiiwURqSIjTpc3HtY9ID7/MqCIF39/8sG9/vkc+tA9pfDJO1XD5jzvbh/iJq/kWaZPvT32J2z5wB1pDpajrBiJXGOfE5V4F99z1ZRV3eySYAzuMWDeflOx2+fC60lUEoz2Cn0KJQ5smB2EAXShnGaWvMEdeNZKK1rekuQQUPBKSULPanD/bVTRlRJKIO/oHN1d/eNYYIj2XvVgPZZ4GGglySuwXoNkdTewirdXxxiGfVNMr3kJjpQzOOR9dfKMPkyxjC4RSOBdn/xphFFzBFXyxYmj7JWrdBDpYG6RbvB0xB6B8aUgCxjRyhzuEq6Qd9ompeFRG3Vbu7+7+PzW3JF7++L5D2G8YwQGs4778Px2eC9dzdY2KJc49haT0BLcIzbGhr4zTPPkySNqRP8prBhDbtrclpbT9/C5+2W9ouB39/YGy0YsfXl2jg76An00T4UmZEU6dG2hTZMf3MN7vHI7UySa82mnVZecgSW5+xiUK3Uprf+bAL1jsAGHoJLkU1IVlSJX3lEetfefqA50SKCT+8DtZRF6FpOuU6H433CVpcyUTGQF6yIjlFQGIdYIVHpvZ0KxYiO3WhNcKUpi8CT82K/2noYZ7m2bcwnj6bgHHvTI+IXvURuv0D3Y3ITABx70yPiF71Ebr9A92NyEwAce9Mj4he9RG6/QPdjchMAHHvTI+IXvURuv0D3Y3ITAKDTs68/b9d0qnINVrkbTtKHGGctfOPlj6xruhcftNQSWsVJfNE269WTv6uheFe2sRdISoTDx+brE5hyjRRhXGDOiW2TShSYR7EwhW9FZdPwcNRZshbJXebCVdf6TjZTopElHxG375tkh7ABDanHW5DX0xLJntny3gQIOPAU3oUAGbupfPhkaedbZSkgrz6AUW9TwOcs5RMsxiY+xEkVHkCF24mBIv/YzLftr8HicwoQ/j/5ZtyVokKbo4jOOkt7smtl49tKTWFKAtxo97HEFzIeMXjn90z7aa6VPftJYU9gMbhkp2WF9BGDN3SfUpOlwhBrp9NFUbDxq1KobAvGIPKdZ/MnU6mt14+UNIVSnJPQol6cr6jbNFFOSlG5kAftomHrMx0qQm7r1LNVNikDCTJh3hOZG5COffXwq7xo2BbQEmBmKMAvsLdZ6JyjZFl44FWsvSC7SsbzAdGXUaxNggBse5iV4J8uhc7w/GrHTwQwUFGHkxwAylo2UIdm/zwLoW5RsoMKXC/7E/y415lgrmJJ96lL+MtrhNAyZo4T51DwHHvTI+IXvURuv0D3Y3ITABx70yPiF71Ebr9A92NyEwAce9Mj4he9RG6/QPdjchMAHHvTI+IXvURuv0D3Y3ITABx70yPiF71Ebr9A92NyEwJeV92zbp/3mKgQ4q/UgZQCTPMzuM2g3m6AegJm5UET0GpVVnrQBj9O87rRqPvkPOIdDkJmdcnbUka8fqOzP/vhYHb7eomNPpHUuwDGpT6kgGG+FKjCmFg3RgZvTYq1ICC1S+eWA73qdQMh8/zRCk6xIqmg2I+l5yxO2E6p2ZtOgUg9Q9LiSTbOPxbI21f8L+GGp53Xr85D9alaTszXD+KRbZnA8KBUgw88+icMw4Swkmbzm368+/f6eq/pliGPc4D90gFzwG3jHzEyIebmZpVBe2TlCKd9lDLMlgZij3f/MFFg5YTvGF7hKrUjvHQxPxCRQLuy4+tyk0e3rgGSBLPgt3ebMuSeG2KFG11+d/P60ns3nugjij3kfZilBA+4dLKCCYMUOJ5czZSn9BXTohjhpuyRcT+DGrqCfW5YyT+78KnhiTwJsU4vZlHh/vB2h5DIMkz4w52e4e/thvxjg7LiYgA/sLtLyw+w0MFVyK8YoakBbiTs0mktxkuk19V4vXB8cVEYpq2pyh70tpE8tLnhdQyLNhME84yk9S7n7bUyIAodehuIObiCw6FFXpZZbJBE15FPDs+c7vPk0P7mpwTxC3+8ofvq5S6SzSk44FI6UOew62K/QRlM6/MobYJzv8GKxsdqHyvHvW4pzcJMCnjgqx9HFEYH2XBtEbyyeAk/sDhUhkZZe3qoVA5u6KSI9zB5nZwZRcpYOWENS3wDXcMCLuePPIeFrHxmINMQiJOpoOTiF3ADS2EfZAfsO1plj9CUqezlaCpBFuHz0uaO9NLBVDJwZ/38aDVUf5IAy6sbMC16GEo2Sv9M4a2mkIB5YoKk9idctZ3S+yA8vxebmHtSaLbM+dNGNPYyFfY6PZ22AOHTX7uJY2CC2awGLEnY9/EIs3Pf8hpNlJc99hWAbokChM2plEvfT4vNTOdmLefTUf8mcwgR/J5HXOYrDrWgv8I6Vj10gqz3WBuLgh49332wJ0xdBOgZJECCbqmlRatygSV+2iKMygKYzgHfZMXgkbJKp8E+bq9BNDe9ts88E45ibhTqM/wwKrXEDwgPL0H3QkXeINT5g0v2+Uk9VApFCmDyridXgRkNjWLTLtoXVxBydgnIoIA5k99CA3XRPbR/wRmuQ1jykj1flctSRdh1TTBvp8QiMAiQNCJ9HUKjSr8wLq+J7VrzJH5oRlY6UuA5smwxeVEzrerub4+0eWy+AoEtUQ12Yt/p3kbgov7NkrkQp09lgP1gMOpQMg1PXId58eN1KLs/o6il4p/BMoi0bzHMiF1GjoBXy6hDL/0nGD2CLdbcSpw22qK+VnOdNXAtko3J4tDXEN8Zy4YrbbXEAAF+ikWGEACRtv1zJb9akYYhWBIQ0ZeLoCKDnn8agq3IcbWLHZDPjg7kqe6UXgesN9Ej2n3oH+aIw7d9fwXk3maBN1YexT7oMvkVLkmPve70UDlZRAu3UPOhcHKAaQQ2FwEapQnTkRYQau5FlxOgah+SjU/34rda06vDLXS2twxlIJaBbSdyuxvlPTBmJKzPSbELezZ5TBKHBpfJtlRjQQYAqHaY2lSLcVqGMDYVaUNrYC5OTyXBrrW4tWchd58Aj6IhiTLxmEc0MixozWwXY/3RJdJLmN0CQCyH16Mwgi6KIWj4fW9owDhB904MUqdZMWF3DdO2jxmtnr/SdYKkdSXwvIFqtFjEi+R0krMSn/E6wZgJ1tJfwyo1rSvFLOXl7gFmCgSyWJu32T7+GFrwoguAM14fPJjsB6EgEQCZyNAe0k8EIhi7nQ7jmD4x2YwupKJV7mhuGSA8Qsn/syOJr3egcwSuEJ4aehW5f97wvEVaMExpxwRDF2IHxCuZS/XTxFGkxW6S/42b87mmkAS4GGMh7VxBdMwBUXtPe/ULqXV6sK7kAo5vJV8X/ikOZzHBGlHUuHsw1mVIk8c9RHqdO69QoyU2C2e/MPtxCfFTOUAxsUrj0jnVyNydwoJ4Smt71UIJ+1zx6JwnwPObUH4xDnnyCyqDC1iHIrVweGdMrPKEkZi+rJrWIFtSmut4ZbdGjnEwlm5IDn3NW7I/VPSpWbpQMvDfaBzfDf8fqGc3iMrUka0iske6U1khYE4N/WfmbUHpOClNk+m2K/P9pk4dXGHCApIx8XEW6LdO/DaGtKh98MFeueNmXDIhSLUFjQ8zMSBfgS5KlN3Sm7ZF3RV5AzeCKt+YgiSZP67yfcVRGpy0EdDmSroFtxb1w8+R1M+5PaDXLE4aptUGOW4cMU2RKBgAYs8UAJGHGK7SYK7uvOc2UWqN/cRjrE1hZlUHUEb9R/A3rS8IjkRGVayIgijrDJhAJFGoy1dBZlAgRBLrwF0bkB70HD7QhgwOGwoqwVd2zDD1LCd/IofT+bac7wnQmS7QxMCgUkUNe7dypdF/zsvJAFxpJmHFu84+H5e4Z1FtHhIIyu7GsS2xEuESHFZ84DTBF88d4pCs1fCnHu1sM77cQPjDavOOVrNCzNgPF0SFy3DDxemgEUsDACAPheRVTmcSIiYXUppU6lP/li9wvDhi8Ake7FtuSPq0nDkq/a1qwbGzk2GL0b/R9ggur8QlS8SSX7RH0LdQ6uynDnuAC20kgBInaxgI2sGq29QBP1ANugKXethmocw2OhGZ18ab6aRA2LgQcBOhnfwV1z3xTphjgTUfKFDAIi0rXTrLrtevJZEEJtIZGkKD3c7ItHAMEWGAluS3E+UxRvUajTZp8kd4AnNo0TlvaTd4NgzIDIxNoZCALjYYLoWIFTDY3Y7264WBq4Gs0ny9WtlUbBm3k8EL0FqgjgxuL4nXMEASj3ujUCELhgNK63iqmREEWcjl1Xvgc5KlUYzQfybUXYRE/a8FYcMcHlRd6YVNCpbEu4as3rC6pnp+gSJJaKwqtcgFFbqB9mcReFvh3bhleBCVsBknUQ5Ggp1qVybfaiZnARL/+LC+HO64YHsIVMeOWBirN+SiVg8xATYp+iZCkgnbNfwSUp08gEznFcZWRUwq4D9804AbyiLzHn4ZiOXTuUi2JznhGEWBCfYPUZB5lghtrCvDAO2aZeEl+Rkjca3WHV3U9BCVyRjXpEOha/nZ/DstS5YCavDsGccoVvGQAvRvrVujwfG+U8ETDCbToqVAyF+KtlGku+e62BIl6tay7F+Ro53CRLc1xXxC6XCXAnV8wCxagnZDJH488hLOjXkFgJYE1eFXh4+S1caBdMBrNT7/UK9gcji6qG+Krdytw1FTBUWn4hlwfGgB1M21IavV+Y3qvdFRlQ7BIdcGo+DZVfDlhHQA0TKDnIOKiGSL72IcP8iMEZ8qchGSym2y+UGVRKjP2TFB7gNBqcHDJ/QZHA5Q4lQQkA7s7/veV0nGOV/TG4+vQTGWzCGaFFPSZw5p8HjC6XHNlGZgEhHx3C8CsCZ/d7WgES+CZboNUrg3Cvc9ZEB5wSno58wHTk48r7C1hTk79xCGdBnH7eSc6C5w8MVY1ZMh7abi1L6A7g4y6MMIYyoh4l127r7zEl3N9Ig0o2007xC+kTxRSE6kH4ZdeS+cJb0R2Az0NduhAAbFwzAjiSSxEg+9S/w3GTLHRzSbMjvztOJTK9aHGAN0gHSz6/GWnolQckpIgpxC2mfFFWwOOJp3IR9agQKwqvatJl7S5n+MrjJe/MPtDTcBOZjxrsYoksrw4piXfH747ibgzBLxQTVAwnjSCxvVQeF0xrF6upb7X1Ag9XIIL3D98s9GZDkLdKlxZm7WD5anljQV8tu71qVx8oTUpFllQFdH1R6jbDsx0XKJmz2nT1dtiHKtKWeS7kzyJGOLyKpebzB8H6/BubrHAJLGL+GpSiY7IEx7GBwU9IIQ3NOG0Uaqw5xemYPhumpwXC84AVhQVWuczm6JENvdACMORhxIqsbY8xjPx8SdioD4RpJkwfbpIfkbW8LK0vXiY72o/kqdLlX2KHFrw9ASwZlG2a4fu0uZRUiwJga8c3IdUymsdaBxRLfNMZrzX1Mgmz7B5wgAhKQNISotGQbTsMKmdkru91gR5sGvcS4tnKGAqVDGxKQca56rLXoBNa8QQt7jWo588TzDJr9maC8+QZk323IFXtWAH2touPuQjMCEQ+axAN47ND3khOaM5twQOHTFRkwJTzuRt80tuoFNYOloJAB6SI5dh1BEQA+x1FAtLZ+oFJhjRCtrSeHQzEWiLQ+fMvEMB2SSDpN/8tIiMBo1XWikzxM9j0tqd7q9M1G/oNiLDQdbIfr5lvn9xcLilI/+QATyxFrMVvAMJpTm0iLlIqLC2UimAQRsa2GgWaIyB/b+MHIRBVCw7OIHAxpxXz33uC5sB5tj6EwNuAZr4UnbuSBdyNo8hr2gcIOyQjJ6HUhYxvGLufBPuxC/LeiiYLmYCHGX7YUfiHvXAeTZ8QMH2K4sxoU7qPGV36DntDHd2WsqXiYBNP0Mqli0lL5RDAMi0HSZLRUKBTpatHqHwb7lHt+akEF6FfOYMLViyFHCGMn/LCY+XcQlhZEOxs+A5Dfz3T28lo7QknlWWmZ2Qf2CEEOOPU/hL7qChvI8myBZWEsvE/frvDR3k0xcm/eAh0MxuU2iwvenaHbN/pFZ4CRAPEwioYoMAgjXZ+L2v7EM40a20d/HMzHgefhsiXlSKUIk17uk1tbezW8AZp+lYCPyt40k1d+hAgBCdyCJCUDmL29WtDSuR00jh6w16+MSRXtMje//yWeksrUMRPn1wJqvTNgQ59HGUq5MQF7HgNDEZx7PKkXHHuMdTjj5PQOSjse2Ovmf6PKX1wQ+U35tol2mp2tL70rmoOsMmtZk1sIzYtA33g8JhBlQrZM9gikhZQFa8Vk56Qd/o/Qz/Dv5wIz8otrkFab/hcrC5vgdAsEHvqtbPjzvPyRq2zD1a6uhZyLa6nHViID7h9PMyNjC4gtzx24PCfUQp+pQEcn8nDH84SzTdj0XtXjFFvj1bHlCdls40kJ+CpVNJYRz3kR3MD5YPZImIki9AerEp8Qhr8GrGZmpWjQwDDnDz9OAHFjQrWqJLnwcD81F2dvrsHMKAYS/zv/kNCWvfyXwTZRbyQDxUQ+CFmAmrfFu8pS8rd6wzTceuq6tB6RsfkrXn3ueET/eUc+4LlV3VunzlH8QH7AbVW7+cCIhA/SlyCHwBttBeCSAi1q+qwpe1x59SUICspkI/MNI7a1s8p86xWVj+LCCgvSjmNRQv9i8rV4OoxnAYBAgdDSucxYNSqHK1uL9oZbqFG91dLurillXvseJMIJwpwTRnpE7jOSsRlqeDx8iPLR4zXDhRgIUKxljn0y5wqPO46jaQZbLOpkdlBBAodIm8AAqyjO1QyRU/7MGrUxwsJzNeDrkZZ3WfQNqpFsT4XwlaijXUMZPrl9E++XSmIICi6rdJi8PgIY+4BGYBvMAOaK5WLQ3x33X8efxZCak0m4FJSadPnSc4vpBHtDb0LI89QDL0hzog3rcXsOp30Kgm8ylXdhSNS80GSKJKqJ8oJaW5FK7t9WKHFp2zYww0pHW6xrSbkekolRKkwhsKq7BlbICE/8fhGqfzbHr6smMgL2n9/bdFutFYYEUzVfoHGICixVOO2DKLbaBE7YY3kAg3xBJIvnVnsOm+2QkVqPhkjb5CSW5QHM2WimTzHMjOjIqAc34IWprIhD9QEQKVIpxDFhJIlu2Bl5cbRySE4uz0aj8wtjaVrp6k2IUfrD6OpDthwCCyio1z/1CnS1hq4exk+I8bBtnWJK+k6BE85U5geArEx3LgyohVLPhkI26IYCewSTptHJgf/QxfDjJ1hwB9WWuibYLcmSygbxWm3piwmTGun+vsOhkL0WqdjcV9VC9CODc84LTBGzF1KoApEPCAyLB6XlE74j3LGh8waSsoOtNFHYvKfEP+v5Be6OTV5Hw2ri3umcQ8o9kse+vEIiAJDjcbImLeUs9xGq/nxxkoBFS4eDazuQGe4qJmzNcZ+JWunpRXfluOzlYHdEeRvjBPw7vCaynbk4WnSWcf1v+EV2fSjYUvXR9y7KDNyoqlPHxyHVNeTRGl80SUKSo6ZwR/V1RsgpDHDlp8n7RmG8O0eEVxbTQAJx89Y9OERYobAJeOSXzueqGlCgahjLrartwiMCfJouI8tGv0xzrWY4p8ROlZpLWAvlMMVloVWBZJOFyapvM9yqyXQ6kTD0fOlXhWtm53I4yt3ggRLJr0rq+wKdI5UqUjKhcDIcgmncs8tFPw2r7e4eGvnLgGRydARzgCSHdpDXwnnqe6XM+AOwCsgJ0UjQolruwJlu+YEvZEnAtTPXsxpzl7epO04+o6KZgvlJ8rKg5BB6+rNzYLYH38DdyZLtfJ/RZPc3+yzyVQuFixaIjLm6sud6DzWD6hT2xNJAJcrcnJjFcI6nu1rWqwbJvmbV1CsXRrOqxgLbjDzF2rj1DyqHW8MOrQPvWtiSw9LXzqsmfTV3fJ6BSRuKXQSPxcqjM16wk6zlwumCyhOIcdXGvE6n9xeZd1Nlm4OBgAPgaBfHpaBi16yJznCjJUozDljpXCxO0DDO0yw4vYZBC1wkPXDXFs8OxyhOiP6PwJdJoKQRku3AS0KwwHd6l0FWByypqWOVG1wy4Y1QBHNGPVJOWqiK/GRHVj/jUDSDgaETsRJLSlA2J4oRiEmOe8cCP1K8pmMvS10arla0fcpDT/ywVbutiJ7vsAE5wsxPR3P2o53D9+7sNTcPcAuatcOnKIQ/ieLn6Whrbg7nJ5OEa1vi1b6EZOm9w7PgRNqTwN8F/jbUB0i7YnomJYxzNsRX8M1LeCBXZ1o2HjmhKWXIqoGa0MmTCb7/jUm0YBJVh8+xdOJB+mTJBquMqlbgN0DV/GUG0r+hyeYvzIafiyUFWH6FR8frbPDqrlpYNw0wg4QExxC5kk5Fh8IZTSsv4odNu1oOv27oDr+Y0T52fTAC5vHLA+/GdgVb8Q7zjUhGQ/9Nva0SQsHwSwhXM5RYOoa0kNkUHLlt9JJO6WvQjCfCfhujVKlAA1l+OqY/Bf4RBjb+vOXNGqmMkYDYmh7nGQN+eU/Ak0vb5bs2R26q3ZPDAsU7nhTfp9LHSPMbuybdAcotx2ZIfjlAncyF7R+JAUKqFPGXG4SF8B9G6a1vc67I4fH6i0qJOdm+8+aum8PPSIpBr5PpFXOII7NCedGMg4WLlfH/aqkpBCrvxxPctfyHX8ktW8Sn1RM5Lg8HpraUhu4h8/M7ovb8x7yD5/BGv4YDCc0AvJb5vswF/OTpjuDKHLvE1Z7dFqS32ZkWYJdfRpNuOBufEp6Km3VIV62Td0EWHd0h7YgSnno0xhQixivAki5Q2g05eMTG7b8ybhf6gsNBSOxxD9/qSfeVQ1XoRQIB9G2UfyRy9Jz9zR6/YRRCiRmmBjpHAdBBzVJO3+jlAdoJ/rEXw6G+NH2Ut4K5csXCIg8uyUnQtnAMw3INC0oBUY+EKJMu7topo32kRnedwvTaHBQMkZcLcvtnzLY9SMfHzBGd8baPu6YwUoHQv3mAlBENfHst/10tU1omsM+hiFsggoiGr86U/RP4vYYU9QnIX8Ul27kXG5DvXESQRIyCnzp3XuEJ8RbfywCE5U1XSRkrrFJEMOVz0zqnGijQ9waYB2m/gc7u49ePCJ4n5L/Cn7ztOyfTJPwAElwjH0kcRCXNhB3GyVpGO9KMWfvgkcXn4Mtfvx+B1BcGWvDOeXpI040/v+ierTD7heDDFb/WgaNBUanmOJVKYeVaFSXj4oo7kwhl+lzGUx0r2xPkx6HD+wJl8a1G0zP+///kKMy2iNcs9B3q6hkp5Qb0VfsnZ0e3J6VDcP8u2pY4IFGqRoFCU5Xco9xv7VnrPbLp0zaTAb/Ahdmq7PfbaZuCmfTDQMmhgcXmyHr9SBR6HgOOvQ2B9pwwYRhwVBnq81PtsOKfBgXjcN/SWmTug/3Eofey8wAJS2bYLMe7VwuaPV3tggKB39KX16AzWpBQrSMSpIwVhYR0oQ5MS2S9VXsnnhSThscZSxWxYB5/SmQWicWz/RJJzLOCMmdMUwCF6U8DFJq2Rl9QZD8dpRMA8jGsl4DjfgalrT+Ho/OExgKXe1Phsm+EtK/bpnlzypjE9Yek3HgAgWwgKl2DeZk8YIc36LEAA8NVI4IqW3mHJxf7zxaFCuwJm4qxacpVva+YBu9U8/lUBPgk5b+Z648K0kFTjWLzKEH/bKcuLu4K8fmtgZtltHEHfCE6bN+x3SkxKjuYtjywRlv1lJwtI417CcpS95RhMAa/Ct9J2Acgb71LibFdzU0HKLMeCN6kRbWLiaY16SMrhUmT4wVuSWme77MOo4EDvoa/yLIwLBPm46KtyJEhitkDF+smYyj4sKJXEoT2+a05RGQPMiYbrgdUvvE6JSa8x4CyMRDCzWPpFt/c10NPHAWE+oNLLz1BSWGagUVxX6ptwK2ITgXMnU0S+O4N2sXiYMArWu5wSJMKar4oi+HHs5HFdFf4KMqWKCFQ9QhLWSVLBF89dixmY/7zBSWzBmlVa8fageo4Dj4eq04DtEwSbw6JXomNFfMwfVQ6WP/IRJVMhLOTLjg7087o6CGAz9fLNYg4QXxukfQzMEW6TScCxXJD4srsfGHTQfDhTPuYkHx3RDJu34h0DqC4vAvmzwh52cGefqp70to+chQMJDxXM/SEILpAvWwmtQNANWYMGJq3hWJskH5L5ZltiYNizrb/TERTvMvkEcjnxzXjggLESudIgvIBzq7TJU8OPQVmZ6QfwhYrCJhyzExvyaANUZNomoXIOu7epTBE75qG0LwyxsoB502Ev0LOZDWveQDeBPOkByZu0xKblMTHSeDBhCKLSEDqnq9Dsr2uWIMRzZjCbj6CmIXugPtqyBsfPvcjdjqJgAfbjBwJo5C+6k/rpdGumUJDtunS5si/dN6Io3N2xGtGe7Thwgq3HNFM9RVy8kKSg/koj2aYnm4r4KlVQ+nPbEIXkdQza1JhRMfvi4V8LtQJeK8ITo+4JOnq4d6WDRCYwu8kEadp/bTs3ZxKoF0aIESUOKmr98nbVCyrL/A63W/JWTqKzS0+LYulohb+Y1kaR6yJy6u5A8QJws6ci2lWdUCHXVhm1HtMN97KNET4oZoFk2BbYpJmmIuK+7+4GGHdwezqtVDWsFZGUox6LTT53kLcuXHs9t3nzLo/x55Pab/GyNkPiIl3pNkJ3KP4LmghhAls2TqaR14khaJ8J1H1eoZvONooOXBv+ERJJ2PKT3hDJ0LX8uRpaNiTN1RFLEvhx/rHsAjMcNIp8aa71JpQl0U3LbPuSEXeq8ICHDZn1CgEB5lt6YSfrvp+PyatZhZtxu3wVoV+75zXblPOqo5xGQpxKwdlAQKRnG8vIzSN7j0KYj382tlKgHolxgQV849Eww9pxf0Vq155NGvycHcBeMlbqIb8USPVeHGMhOvxA0MFKHi3jWpD3kGoyWCKABiDAInwjUCAUj8n6tyob5vUTYiTudWl86EF2dtS6iF4sMICG+eT7qG/bcpL8IG+UVQNxgUfoaGAAr8mxLXGo7CmMsGgbvNgR1+OjwKsKMrl7OLCGc/Uv5aJNhzWCroTWTm1yEn1wyUGE6bT+TJFSsMOa4VedRBk0/B1Wa5Fexe9JIdGFjoSCcny0b9/+LbgjNIEAVJJwMg0qwaDg1v1wBUMNUXdqZrNH65tfAgiDVSKeVtGKqAhkYaFKnKFFNNBQEYOg7u+XEFGwPYkAUD/ivVywAOFFBCPgQ2Awv0HzC6Bi/FKfkvPZzHLVW5dPvPOYTYWwfB3imkfVgmrKLbf0xFRmYMuoZBQjAD7kyVz8s4+fFaFRkyPgjSRVDLCbEYp5YyZgB4HWOh3Zh3nwu88hJUH/cEY7IBlRF9Ac/jwQmiBzChHEIIvRVgees50lI74hU/bRnpX/hRzR8XW5lUek5BzBgG5/c97Qz/NGh6I5yVfZhbGACHiR8cRbElcU3hCvh0ux9YZWzsU+MTjKeGSg7rQGMaV/vHwAKpcncR7UBemh3lKennjwkp5oRZpHIkj/k7Vxdg2CDcLioWgacpy4RevT4C5h2zOBWNXfR1nI1rPpXJAttjD8DXTx5k2gfl97baIATyiH5xBFJXctHj+0EXSt4T+BnhCvU0dIgwhjZeSvyUANZi6SLOxGiuBcxOWTg34ByYhf9zcssNTcWzAsDUbSAdOtVSAZIY5YzPcreb54WZEXPHWdbHqzt1YfPWiTutsAnO28+EsQ6mxbDbqcIS3YwCVmNSxSefNRffGoOwbjoFIFs8Ys/x54lppvNkBhAwoiHtepQQFOo1+bi03mloYHwbjttoKgT+453AYWvBpohNBCAhG0OjUotXcHsfADesVhCL7T9oHa15RzRkZdXR6PsI+FqWExp/hf8ATf120JqzCgxXAbuobeCsbLemxjMhniCr8d8iECH5HhI/rcapJXwfu78S2TrxCCB+12awsN5CJqB+jiah5HPL8J6YbvZBdQcElTa9uCDx33iDkwuXGY4Ax3/NNPp4vpd8KpJkXo0CBLU2xuqSHaG9ahbMnvhALQ7kcB6s0pHkXxChuquymd8XFI2VrvsARV1R3MZEW0NHFgnNYUPvVTaFq8cEQHhjOQHKPv91wpl9wsz/JyYyk8oKn45t1nF5i8r6uA+hosP4DEVWiBJuw3HS6D2hIs5piicywgEMt0Au1bOzyI6qbvcfrH85X0XARseawPp5VvwqJSFdnP3Wveo5dGUTojPaHxiCk/Ph1FZ4dxECEqQLUpMX7ZvGqAZ4Q19t+20GAGaKJUt8zrCRj2ezn4reMvI9CgnzPpoqP5mGCwefFAs8fhoqQiXDXyLDsYu6YFza2WJHECE8lg7PMfd5IFjJLA+M3xvXhbp+40Fah0WQVBiWH0Yewbr/Zr3Id5gBdjJJLXVuJGnEacNquG4z5Y2KOZSQ1yOrIjl+95lZaPI+6ocbG9kZTH+OVn3FIFLtfj38pqH4GlDPjeWtQiTx2Hm5GtWA/QRTguPqySbI9uzb3zg5SpwKvZn2TkDb9AnkiQBcwXOAEIHcT5sOj6xAvJca4Sos8wYTCaPC6EDI95lOPT5Kvh8BPYrMDUuk/pfN7fJ+Ab05Ev55669yC+ySj0xR+4J9Aymxk4DXazmYmqadv4+bfisaoGl3WTgiqZDNPl891jnADGAaM1yQdlzxu1uNRR/GJwUfWQt/YxUU5Sr8kICVK9YMnMcikCe8rm5LE0IPiptAHSyk/uCcKSe/SK+u0N1IgBA+8NruKafXYXCp6XNJs5IQaPyaMEkd++YgrVOREARrJE/2TRrkxfEohtJPiKpR/QrGEcbg3k4X/TUGN9dKrjcGnPmrN5GsOBwyQgue3+bGAH8fnzW2mcaG1PWPOBIhZgLOR+KxO6rHgdkWRbxTYLIm3gE9GMkuamfDUoz/Ve1EDQxs2EYTVTx81fWnOprdMQ9LWagy+HMkaok9tld1w1gToNCIqNIDoCzGHl2Ox/oRD+W+iYCa0oNNfzer75Q1MQW6V8Txtn6j6A5L0K+2oCsZrEd1NqCvKHucyRry2lzxEvzqJB2uNOdCPzK7DwffCiYbDgQddXnvcaDefT51L2sgC2qJW129Orv6asshhKbCHs9L/KaBugLSzyKzUriaZQsBGdR8TQrvnGwwyFmM1ZQLTxxlcWXO9IriOGj+8Ob8AJA5IgUrTOFeYrxDO9S0jiPsHjCUeYR/3fXf1PvbwusZbToFNtvyXPUWq5ZTl+QNHKGSbeQfO/E41Ceks4qRhxP9+SFYiV95sUjqkzE1R4cCtOsangM+mQm1sdP1P/BaIRksowOExKgkzqXtJ2viaAKAj5UzR+1k6cSC8mEE7BUbLERIHfAlQ0ZAnXWalqFHE/7hdZ7ZoTOKesAxn9z5kQk7VIYI8/Da2OjSnPODQXoadILXY7l6zNgxBR/kfty8AAWvL0ytKKEaL6MBJpiAeB1lEmuMZcwuVK2V5S86hhMgJnNuJ3r11pzxpHKiIUT2IxJEGk6jrtzy9z1ObH38cg8F6XJnXYRo0gpm1FkzzMcmXUpH+60vLFUvmS83K1fkIoHhG+P9GC0GwNRUqKflaw3ap6YZpheyjbrBS2k5JOwCCs3eEmCs8iTJl+4AjuxbBuPwVDpn4TeS+IjcRSie9QQ6AuJKx4WOrT1V7on2PgEe5q1QtgbOdkUSU1GO9qctAtl6dhdDS90IBJcuStphYRxB03V8gI1M+CApazBF76cJIH7vCWj2JTwvocypPmqYGbrDwlF0jqV/hv9SVypFixTTkihSlv/WLpG3XNFBpWAlhygJxrsRAHfOLslVbIWqHl4TWEnhc+cmvXd4QKaxnB7Se1XRJR5hkMU+z4IEtx4coZJt5B878TjUJ6SzipGHE/35IViJX3mxSOqTMTVHhwK06xqeAz6ZCbWx0/U/8FohGSyjA4TEqCTOpe0na+JoAoCPlTNH7WTpxILyYQTsFRssREgd8CVDRkCddZqWoUcT/uF1ntmhM4p6wDGf3PmRCTtUhgjz8NrY6NKc84NBehp0gtdjuXrM2DEFH+R+3LwABa8vTK0ooRovowEmmIB4HWUSa4xlzC5UrZXlLzqGEyAmc24nevXWnPGkcqIhRPYjEkQaTqOu3PL3PU5sffxyDwXpcmddhGjSCmbUWTPMxyZdSkf7rS8sVS+ZLzcrV+QJWa0qVU7+FhOR+c4PxwdUEWLca9vY5RyjvLaHWGXITh/5tK5B87bZ2aGDpcukdd8GZudZ/TmEFNwROPW/FFttBlTqTD3up86O2iZxwF3VJSHOdPqhSKtGbCXsrEpfcT0kH6bUhjOUMaNXErZBcuMzIqulrvrXkD7BMdHgUBv41wCDptKIfCBqWcYXbB/WUQ8oUTbp9EW1GkseQSwEBoWPAPU7yJXozrkN95S/O5lYKQBsKqtb0Is0JlE+mHCL+64WmtcMnhMJ5l/WTbLuzgJoAfeYrKymkSOHiD0ilhrKSR34TKfHaeDktQxLMsbhfJoqMNRNcV31HWYZN26OmUu+DJ7puM2FV6xIXyXW4wUC2BKQ/+30JILbgkl1x4UUy0QFaybqOvQ6xPQjdsRGEbVTBBc8hIlQJn3P9R4Hyiwi5A9C1oF1qCQ0yXimQBJveZYmZ8xp4PQAtYNJBQxvalpuGlAYSz54pSRyBixSM5Ox0CF3txg8wOgCqtCUe1Sa9WwJhTHNLTD6FNTCRB77WBXqAyPtS+0ggzGaGpbRXoqbdg81ExEBAFFLCyarIsbKD8IDQ/zHZZ2E1CH3BbPGOLzQDbxhDH9SK4ebsCxrxJNDdBpHjCMzmcTk/rc3wJQkGqoUUwGXaoDjD4nLZB4aWAX9CU7XUaP2OiiqgJFUbN21YhMea5NpWfz7x2UBqeGyqNgNPYTFh9pY+AyZEvLjOJ0+H+CLalJkGUMg8nYWKqmaAgkrkeQp5tT494IplPP6jccceqJHG5w5WCeShoO1gKEnCOMTF2IFZoktdslvv3oLbiaqhUtFNKosU8R1IGsOXNoFNMSBlBtAhwmEvM1dG5dtHmiTMq+lhAQlEDdwoE0yMiMBL5Gwj63aimWUjRcygeAA7qszu+o2+O/LhFBeqbswG9dbrCUNIi21C5cb6jtnHhZ8j/ovKr6ewPLiorwNBjEH1Lp2/R2szMuGt/MhsbTBDVQ03luDtPdzgNayu46nmR/CFBFfKFF2kzOgElEKoFcNERF5Xx1XAHBSMmh/piF7I+cRz6pJoUXPAKkkJTs+hgvGgQE/sI6c76A7f2JSxj0dGXFYKPY9+NqO+PXJJTSrBPHhkJrPPsZPo1gnv2sdoA1gMqF0CzrZXWhJQ1/XiAYWujTPZn9iwmY9ofCVdUoUA0ZAj6ySsPCC5WtGBCF7pQmKEyUdJy4b9ruU2Y3g/3ghHpszAljKkDDzAuzJIuFbH6HssP9dKlcDshsbBykqVBipGrjuWTO0fiHvmRUKnVoIlnsjVYfETQv2TE4NSCJiC89pBLw7ajlt0UapIwSSTgEACsc0biPtIh1LZAEuvacfQuQvTym3F7cduGeDZX7kB8G2wZzOjGaiWwEw4ihZrg/r+ympYnQQ5vfaJS8e8agYL4XREPFWE7zExmLOz5KXFOYNL7j6jhr6F2+EM/PDehaWSvOFPjprqk906tBFshwMbrZXzvElkoG8xUjCN6w3EOW3ZJfi0tKCZJS9Kg+s0A4/CPK0MQJceGa2DnbpnKImGsV4pm4iC92iUj3fvmN1FOGx5l52ePtFXfgjYE0fQRqdFqLlp+lNlcgSTdjEickYsxY7dQa5uNK6qv5z0q11Ki7tysMXVxIgHY8TeRXd5CcNX4rh8vmCR6ykRSZKC+gfyf7c1CGIB39Hfj5dqKpcJefB27jQ/2Lu5cEWcPbEMAuAyWHtp2Kge0Utl29dZSkITr6SyLdF2dPYNsaVCx+8GWa3jijPpyCF1UHXYKxnHB2vxQ3OSLVS/ZeQYKpm+2MDHNOXHZNFaW7+dy/+MOfaDc+3VkwXTbJBIfblx/6JGRGoY6r2mZpv7qBkiNbBlScAiPM46hxh7htYcLXcO+5BFrftUM3x4kvR2pQTt4BIGyD8QviO/y3597CWTBar3D8BbpUb6v+d4WEKpfTqud2GGCxjYhGdDMsjQhF82cb7uASBAkfQEV2mM4SaUT3xCoUcvb1gWUq5Ch0d7D/tUz35Gmixk1kmKs1F5hHQvgOr3A4YgXWWeYI8j3ZakUjOfYwm/OOrMugpwrIzQRkpxG9ACmOYVfg9LB/DLvwJAt4QUhJ9yZRLxfBNeEY31AwJgT4gSuQMGe9qgUt3Uv0lAEb2Fb9RidPyKNqdK8vPCd2ZmQ3GPFxc+lSBuSlR9md1IT8VbfyaNXpnf3SbcHXKu44tFm3iQ4h2p7cUce9m54zlQRkP99xhcKkoRVLvnRial/UZFgrFJXDtKuNf7kF6OR6/KO8agY81oClFaHPKaAJBXBZEssjp8bm/ga0ub00FiYwbdHyck442fP8NCb1Fp+4cJB9D6vmbSNgG7F5y+1aCVQjOdwck6juDH8evUrfe1VUhueaqPUZqSGpJ+LW2CUldFvd7BkRkoe7rdk5gI8DcLRBLmUD8r/6ioC/W0Cz+dGEUtObh8ODYI2NYLzE2dWo0DhwCtwx/38H5XoLqpKHHNSOxCUcZ21y0QE2yKJwgHWIopDEb29xbxbOD34Adhi2qFv21FSB8R8Vc1nOlIp6SEwqA1E8VO88dH9ZrgSTQVrYiwuSVE7kXOpniVfut0MX4F+Ypg5hiutjVHtxZNE0OwAYL3tB0AoGe+q3vHGIgzS4I9R6E5rLHQeh0ToIEtnq9HsYnuI8hMnd7GbUB/6VEPCKBrpcvgSX2UtuRkISEzhoeyWVZmzoKUccq0gwxFCPCDCGEraFeAUkE3mosM8zokiGdaJHqZNYSFFWG48ON1EUOf/M830sMMYDPpGa4YBYeJFzZ5c/p84yjiN9pRG1/JhJvc37IHBWje3q8LrUkQwQK33vazWMoZA/FA2rstAvgG3ItIJ6q71LmFsbX33d2PwLGJLLeeovGZQupr7dJNNwPWL6DLEJxumJ/yqnZQTLdBDZCYqbfmBL/ZRm9rfcLHghUnDbOGdt+Z8MU3LDwzA8TL60m0ASftOWLbt5rzmXsCRdJFax0yC7XxBGNYRYNDBi4RVlpy3LJ1naPtEFFQ9ANpKLDhAP7L2uURQwtDZcHB433ulJEThmijcXjjl8KCxOUcioyJIzAGXe+J6QQvrYbdP2M1U+fJMLp3jAe+TXNKGr/8ShiGqk7q6DcKcygrQAMNW/XE7b9RS82SG+img0pgA8+PNk9CV6OFBnTaAilE5k1JSMR5JSBemRpbYFzrA+Stc4+1+GvEL1F1aRNsqomZy2RftL0cA8inEzKsDv7JuXU4SX/zcPWi2Ctr4juKyAvedZTxoubsMhZD/awEHglXgOEf43vpDw9Ld7UoFOaAsFrYYzr9pakflGB+KGdpAW3VPZnFKObPPJucGMswaoDVT5ES+Z5LU2l4tLSYT7JKU3ZJ68jcDyyUAq7B5cmtg9yGaalDWWV2GJGYgesW6MfDFOnfzVMhUPVgrhi+D0CB88KYkBGfd2HwtKc7m9RnRZUXDnKZJBrzG5jNIpJY/YLmkX8Mnvyu1dtZZx8pWGXIR7NKxJwjljphyMBUb7aFwKOLY/mqqzQ6dHlzkNQ7NIhZln0xBMvRTWtVO1HfrHODTCImcXSZEruaiJcL+S3oB+lC+auTuXLjUDqhT1qoGMpVm4kxdcpI8M7oPJYaPu0FTKiEZN8dDKTlUoQDFWvyA/YoT4UI32rTkF9RaPmW+4IkNPCeiahexhaHLo2pLDPFrtF8TZFbAwIVo/U1/nJ1BmYbEoTFqjg5DqAzYH9xRIBz7Yld8SkFo1aEZy8eApYKLJHl+xBApFcxkJG/jHaciTy48FGS+DdE6J+Sed4vP0bpZRVznL+TGkJXnyKqcv2BCVoDfYijb4AE9a/XogMLyIO5dHsLOys6fFuLDDc/VYbDRsGFYx9fdN5NW/kr4PZFPZOmD5uJii5spCNg5GQOAgTohoXxpYkK0pwthlp+dglInywEz3Z6Jpi0aAUPX/hHkHtZRCh7e4BJPmkaM9DpxOwVybpX5n0hzll64H/pIMUsui2mAykhi8tjg6eH0IjFZ65SFMLod6Tzd/yebyxqSTWad1z8iNGN7wOJ80iK9MNHW3VmArmT5Vb56cw0J5+BEBKwYZKC2u9t7DuAKp+3QH26H+aCM5gsaG2CRnINQgqAc6RzYwgJ+CrM1uWVwILI9tFB+Dfpju6/YLEfz3zCCElAlExYr+LwmTwfUotUd8bxau2j/08ag1YmEB/3LPiI5HyJxyrQvD6MUqTv0ZUMg7IJl4A9zmL7oNpgFSKWIMf9RK2r0IPsPcAqLt9B/vEKguWNE1dQRMdE1BZipukWgVTi3E/EAHwBPm2BKQFSC0iOV+AEUaAKTv5CJbJrBrCDe9/SGzHCjSq6Lz7huU3YCgOpy/ZP6BezzbX4dP4RpoZuieRMaoF+FesNUz8EnR3EWPQBXTXW9YQq0irMBJneiep2RL9/yUnXpQLdK8AfJYETmCh1gBHJJBvPg4P+UQoHqvuUnm1N1KkLHwDJH03QiAnWSW6PcdAcAHpmkzIfGUULzLtMkIx5sFoFdeZ/UbMG2ut7VjYjbNzPzmq8Hzrag5uliDuj9clNPxcH9Phvj8FvZgkiWG7KuatCCCKMOdpKT1Ur546e8EP6XxlnFFvLQ6kDkKEJsmNf6GYLgPemLsmtJECF/HB1ndqblbOP6GIAfl96f9nOAl/ahateswPzwZUlEiAdpYeg7E6FRTcTeENcdh1pjHd3SeK44RvbIGCEkRY3PAYfSYD4KeNtko5bihHa7yJufZBXZCMrAY7SGcbcUou9+jzTSZTrG8WdKPFKLIqG+uXRmcOy3LGS7kIvguCsMfbBn5Vg9+SmONpcC4Njfka3TCmyer0rMdUyBGXBX1bWRKoIIkxoq2jmZX0jB+c36tzfWkP19mRD2gmdDonZfl6ZrEad4fYMHg9o7dxCiepd9lN+2GgCQhYeTkcAgsiEFOdVyNgwY5SAaCqvNkWUAJGAptjWShPLonKwWIqAPn19Q/DE1opGEgvXVl2Zw5ag1z/wnsnHhEmEgFTbNoGJC8d5OFG13Wtf7DaKTnuEwJ35Rno0lLm2VEUDVXWLRkuT9ui7UlhCmVtU95Vi8Ui4L7+IZPJGcMJp30dVwEAGqXU/XfOIMyp6PDCov//Kg6JZo7ljjABg28gbZLekdEkqnZ5UNmePJ+6UhgWKvZkFDWEi9rMcKzXws2a6NcbJgFqBu2YJkd0pYnEONjNPyQpDpTA0iLc52Wtz7WMzPcBHCXW9y3CN3cZYEsA4I6v/yb7ZaRVab71ogE/H1bnpzUbyvGUBR6XyEV1WP6tBuksF+Q8pPo4iMRtEWQlSRJpTwvMaHaciqO0dKWevLDIg+wTxa1R8vz5WlkDlI2iiiLSIs/Y/KOox49OWeN+cNbbLwtWJZTa6M9+T1VUUEKCxzUNclXPkhzc9/eJ0uOyrtLUFyIr04yyi7mb1s35s+CDUSEYwzJCCeGS+6uu5uwZfMwehA+CWCtwKzcRwQkyc8KJAiZLyNlXnIqkJ9AwQFK13QYZ9P7I6E35ddKEHubVj4cmtNuQEW/mzhu0/jiU2NVJFIveT9BIoR55rN3ckDsk0yL7UmNlHNhUXxMkg8J/aXoH/DRg5JLYFdFi8KDy1vmgBayvjdGoJm42MOpDVSy8ZCSKCZptcZiAG7ArQV06uxodpKlePDlbkS5zdPcLkWDIHiI8UpBZK+9hfAu62nCEDiEY1fAh3/oFbtPA9nSM6zkJ2hZ4pITa9XVLIsggfSA/KOAjcDuUGGZA0XeaUK5fshQtnhyKl1NZe/J23WsciY4HnfE/DmAq/QzUIOq1dsr2E3b+tDpoY2CY3Tz/YhpH2hD2NeI6+9aXuZbhJK6TJ0EnfC9SspNJPgiaWrcVSKHTJ//oLn4hVzRwbPrxtgIrbBm4G9/IMwXVanvpigPsM7cYVVohnvyJiIcRmJy4JCITIC951lPGi5uwyFkP9rAQeCVeA4R/je+kPD0t3tSgU5oCwWthjOv2lqR+UYH4oZ2kD5tyIA1ulTz1Fju3TtdXLwiUmKeOHMuNaVMCqXYB1a8i2DLKdjUf876MU/qcXdjwF/sIAbAHuqPTOrSL9l8SSRTX9D1gCgIsdlbxAz+qHZELKFF/aiSqd/tifdH3gTYBAVdWgKvTVYpmJG/AgnBa2gs9zdTVJkmcWHAw0P6rkfQa5XjoStlK1G+IUMdrGgd2GmLkW08Zxe/uOWIMhIK7eh/BpU95X/SKg6FDPq0IjvgdQ2miUIEi3Gaz8XHDUUmUDlT6YttrrlAY5AWk/tniXgfH+zGrbw0gq79z5lVbYScC1YCeS7rNxVXfvhUNr0sYIGJAdAvaUBeisQcIQDvXBibIfov07jT/d8KtxU8XpGwQzdESvpO2n7rrGMrj1JErCqxhMCDJ/cuuU3qLxFDSxCoojUrHHGcV0QEPKvCfbf0OBKqNAPNP7neV+vXKqnvXINLACMsqy/Vb2LwFhtAG1RUfvUX6+6FR8RpeEV3IzHEA6iFa2PLt3AiXxAI6w64zHF6A128loH+Q0a4YMUMQngEUd0KtFrZ7IPS2gbew8AQngTZjDX8dYYkDn1vzgfQXCmqwEzTm9uaHK/XXwwlhlh1VXIViY2cKXQtHzyA3NgEU0yiubXYWCUHjnuCfbpRDFuHr2K2GDHga5pA9s5PbFA3Pt1ZMF02yQSH25cf+iRkRqGOq9pmab+6gZIjWwZUnAIjzOOocYe4bWHC13DvuQRa37VDN8eJL0dqUE7eASBsg/EL4jv8t+fewlkwWq9w/AW6VG+r/neFhCqX06rndhhgsY2IRnQzLI0IRfNnG+7gEgQJH0BFdpjOEmlE98QqFHL29YFlKuQodHew/7VM9+RposZNZJirNReYR0L4Dq9wOGIF1lnmCPI92WpFIzn2MJvzjqzLoKcKyM0EZKcRvQApjmFX4PSwfwy78CQLeEFISfcmUS8XwTXhGN9QMCYE+IErkDBnvaoFLd1L9JQBG9hW/UYnT8ijanSvLzwndmZkNxjxcXPpUgbkpUfZndSE/FW38mjV6Z390m3B1yruOLRZt4kOIdqe3FHHvZueM5UEZD/fcYXCpKEVS750Ympf1GRYKxSVw7SrjX+5BejkevyjvGoGPNaApRWhzymgCQVwWRLLI6fG5v4GtLm9NBYmMG3R8nJOONnz/DQm9RafuHCQfQ+r5m0jYBuxecvtWglUIzncHJOo7gx/Hr1K33tVVIbnmqj1GakhqSfi1tglJXRb3ewZEZKHu63ZOYCPA3C0QS5lA/K/+oqAv1tAs/nRhFLTm4fDg2CNjWC8xNnVqNA4cArcMf9/B+V6C6qShxzUjsQlHGdtctEBNsiicIB1iKKQxG9vcW8Wzg9+AHYYtqhb9tRUgfEfFXNZzpSKekhMKgNRPFTvPHR/Wa4Ek0Fa2IsLklRO5FzqZ4lX7rdDF+BfmKYOYYrrY1R7cWTRNDsAGC97QdAKBnvqt7xxiIM0uCPUehOayx0HodE6CBLZ6vR7GJ7iPITJ3exm1Af+lRDwiga6XL4El9lLbkZCEhM4aHsllWZs6ClHHKtIMMRQjwgwhhK2hXgFJBN5qLDPM6JIJyLh8jjBEt3QsiKDslivqG/6AIn+A+KD32LZ/95ctXyNT0EGQfE0iKKSvb2DGoVwAIPZQeULRK30gF96zgn3QEQ4+MNZB7FkROhnfBzFA9AFg49dgZkjlxK6b+NPuXDQX+ObsscJEzsl8d2eBMBNQAXIMs0HFA27g7vlUb19ZtwyJ4KqEfZKWbCjx8sq9nc4f3BUVRheIYvrrdcWfZo1RC8XpnZGNlCj0QL9Dr4iSlyHIJHRH1c8+Hr8huE5W1bkMYZ6cYYvcsfptIT3Bse/mIqa486lW7Hww3R7tQBpvQQv9IWVbLP5JUNkvB/wT/1cBywrS/EUinEKu+RHT2tCUIBa0qPBRRM9/G8me/ksqBibYtosXUAWXDmE51ZcSQlEjDFU411DtNnNqFXas+JBCHteMe/v8ilgk9HO2f9blERM9tCIVqS7VSjMPnftvQoAZsoDM0NjWLLbi+H8F2+agCLpFVCF4p1YN17jNnG2QCQQV/zxDNSlnhmTWo1T9/8AatcuuQaIWEgdmAgGbOBkUIp2qzE6mltZw83cab6HGcCS6ZKwCqlWrn54DhfYXaeELkLT0qpMCAAo8gHv9VMtuE2mqL8UXPpV7kZ6k2eBYiQKp0JTAskJmjxYzUZgV8ZgOHyiTnOJs8Ll2dgS0tlrbJy2SvStDOfJZcjgA9wmznQplPKQDmBkJTY2zZzUv+DAKQ87em+n0+bYm73jzEfdxAh2FaJexmNy4wSJKXax0Cie+yYnCZg23LN70Wo3L1rAGJoPKXdrSEwxOhW8hno1LB61JYsKBuSIzbEnCCBkbuRt963Jesx0N/ugQGea42XMLJEJyrE3Yof72Tia0leodKIqnsLfs61rgqsHRW0QDfCMp6tbhJcGgaAvly7DO9IMGyqjyzwMVRwQoSpAURz+sIWtK9QH19lOi0AFRh5ID1h95LsMjckOgO4hvzj1eNtond3QDBfp87eK8sJjhaD6kBOtpuyxlYsylVjpsmL7XNiTlXX8Wf2V7PTe3oRDtB4sWPyHRP6hL79vrHriAlq0hH2Px3VOW+qPh79e0oyyrXgplPKQDmBkJTY2zZzUv+DAaT5HnulJULFi4upaRrllNGTvVs03Cm66tq1OcaJY3NALJJazWIKQ2F030rabLHg8JWAxfTJWmdWA14/6U+RFAAtLydWO4+B6BlsGR9JN+PA3x1nCuXjrLvWtvRrdefR0ihLdOcynDCmOnZnZGFW3PGR/G7pF9XJtBEikUTxzB2SXykvjq+HLp0kiRcD7fplsIlJEzdRe4EhxbLVlIZCaGG9WPZaCQTUCvPYVnXeOEByPc5yvGTnCOOhkd5md2zqsj9XRoX89RV9N5AVWC5SXfGeTc9rtXkF1AuzuGJTtOCCCLWUVVVibotKqQwYD6efsfvqH+peVunB+7xtyHvqytAjc09Igoh2YPJKfiz5XkVAc4izjYRWwcMpPFFtu30v4EhjHVn87prSLyWu2TUCnYAi7P9lm0jHWc7N42o8NY4wKOHhaPWU/qtrCdlxpumDkRgLQSLrCi01gymoQc7YR/IYq0EP+/0UPApC1pVPXyKgbVjTQdLae9nNaylaH/SXYOOmrybeVBY6eKe9UEYwfrGwvOZRmZr+jASrvXhxzp3CJ8i5+Zpx+amqp6ME455IYTg4wNeiL/JygOU4crCIICHlW0G52uVH+mxRb/pCFlIQtdnVp/coWFXDXgHM7/nKEbhUKndqjS3/mzEko9FrqZGaGVe1RXo9V1IYmbLKSw6wYI2mJhVe+QEzVaRWdpSwcoQBCrr0NDFWqcAVF9iRmAIVN4u0E8OBx3QCCXEsMRdiOCshWFub1T8+jthJ6Uxm4hNEYNmI2tj+RhikFW8fTfBVJ8TTm+1WkdB6WxUdmbwCJ30pdw2YSUCdAC7y1sT10nB9j6o8FKmeFFS8oSs6vzBu8o6MXUey+ASx9twaEpAAOxOJIbrOwVd/XlZjZlTOETaaovxRc+lXuRnqTZ4FiJD49CTVnlBJ2H+B42ymhCPxY/BUngvJAWLCspFyK1PjsdVKum9svax3z+cV5vatILH76h/qXlbpwfu8bch76srRXGBDuYD3dwYBDjldFw8/gOwmXU+ganpTM2ALcO7SGwJGz0p0NzUhbKF8HjkAjPjQXxkYBMQSsXwWGSNfPj79gg9xnnWk0HL2Xc5ZRY0WU5A6evDvioeec7D7v7c7bu3xJMOv087lFr/DFItDiik2cUIgd2iZxMlpxBdwJRCmT5B9diHCJt/vZoBc1n0lbFUhz/vFwS1HCtvf89FbiR9CwJnCGq8oyCKywruDhZfw/KC6soOEuOQW41OfUXWPCJvxAP2OXJyvyjphjyw/J8D30c71NvfZ2cRtHzMv0HI1OFIGbscZQTuj7fX5V6VtzZVCZ9zAJf4UKTYFuW+YLkDmAYKec/1Puwxg0dDjnAZ1bwIl5jba9G3Zeq+BjDu1GAjRuIdbaltiprEUzk3P7kENcPsMrvC6ru24QRWH2vAeh4B8WNKO74sH4mDfuqAsMzSxSfLMZ1c5EmUTu+SKs/6moQcYb9UsOjl0N0tPXSPxZRKDbK4yxcc6CttGLTNKijGQpynBb0WhUAvxKHv3/csswOKDglzoKJTIL7ev9RROloFqkytblYUbSAtTqs2pQp8gZsHDjZWmex4+yBn3t0vPQArqs2PWYRQfn8Quo5A0aREzJoMnGuDteHOSeR3WTNLCRAdQ1n6fN+bNtxLXM4nPYUMvokci1+jSdGIn5QQIuaDRxFEGUgL+D9+Q6z03ZkYCRMz3ye3GJ2WphEV33FLUwY93w1MdjKWU3SaLqY2B5uAAugHdNgOh3E2XAfxQ7EZxa556+mPXSpYZ2+sfjDnB4LUCiJEDyBj6Yiq13EkyjYHO7p6ovrwre/0XVCdlphUySjVvKGZs3EQ9o59mvnPPUh9y3Bnx+g/ps599A7SbVpDJi80M2VwaSQrbmvwNoOCQ5yRncu2Ma41uRyK4HALZ4pecCUvxAZwxJyiM0y8h15DJi80M2VwaSQrbmvwNoOCSW6JUFtb1OrhTEH6PsvA/4cfBt0Q4oEg7uMjm5a3qa7KJzYC/gClrK3Qb6uC4eP8xb4Mvk9qVqcPVjx62/A9aQXi2G/8qqvg1MlL6LYe9O8FtZTUGhK5zo1f7YfuM6JNwplPKQDmBkJTY2zZzUv+DALLgyfI8B7GNRZl7xo4UE5BGi+ZlVKzE+X3c+qnmVfHyQD6lEGItwo0wPEKrcNoPkJwHuni6/ENcEEe5+UOuY4CDMlxiLSr1O3s+kKidY8BgtTgCnwB7vjtFSFko+7veEPmO/yjYr3z15oSZPpbNrKIKqpRgzhRSKbUftLS6hU8x3H8qqQWiFxEl4womXwJOsi1KhEqlc9muLOQ6FNqAFiAiLK21+0yH0bjCzizZBlwQXOq7sjo2b4C1uMt9akz5sTXPSNeWg7E8TlMPRyEvIiJn6uRE42NqOH7lAUlWcCdyOWUFyniaa1Dvjwv4cx1WYSTLg5o9WZSF3KEg5QaVNqJdJKzO6zGtAjQR67R9gy5CPknR07AAlPYASDGqUh52sEJe9go7bnwlYpq9K9f0OrDojngIoE47WenVFaUjOl1hgdYZGdqKM+3mgGmDlOQNYAXkvLtegFnIeaHJWPyrZ/DW2wgsO58Hi5B8kpRXozMwtYD/XhdevE7z9BhBInK70hKj86x5tccBguC3tMaYsCC/dpTMec06tSlvwMVaiv/gj7/YJdFPQ2wda8DKhjIJsIBKK0leXk7B1yVld6yjhUHrer6+3/1ELM6BCQiSXiSSakU1ckEVV43hVFt7WMzNEINY/BnJzp85761j0B8QrtEvl+nOMLNlW27vWEhqT7XBk+5nVjkAUo6TOjsFkHaLUHyyr+HQHg1saPAgvtSdfGA3wzWLiA4s3q++yB9UBjXhYCt4yspQcoowbrsORq3D4hJmxsBdH3Gm/ajRuga8MLHLFCG4IJCLxLui/pNxvc6CcX6iQlNwKkWaB9VjYEbQUcBXIH26L7p+Gsx3h8Bm9GFIZp6ubBJb7N9z+HCqEzPxXrAix4s6bXvE7aEz50CDQfj3d1Fo4iU67uCWk+0PqCG5AhZ+vf/ZVyWBy7PdwF7BG6Y3hxUYzsnXqKjAy6M2AHqx6Py3nDB508TDwQYQ1zAWv8+KPt+8zmPLGwR2ycZgb1/twAbrg0w7ZZpvoZKRYW18rt9N7hvla5tgltU3cuAcsK0vxFIpxCrvkR09rQlCFMVED6Gaqv32agqG5GENIO1eF03gAbMSUAfsqdEfLrJa5G8NTV0vsXZkZwcrYtig00B71s7zRdMpTOQYxMkzgaK/uymSMdklKhHZGCeHTeElCBBSotPCJ11tnI/nPNHR81qJH4CMUIJ2JsistHuIUNHEUQZSAv4P35DrPTdmRgJEzPfJ7cYnZamERXfcUtTBj3fDUx2MpZTdJoupjYHm4HlR0L1yZ2P8+kwv+0thqN +CmRDPnappNq1lzevX8ccrBG3+MCfWGIOIQ8r4UiENWoTe6q4bFYZsAWb4xUfzRBCCxDZ7PPDM1qlwrnylyIuygqII4fU2SU4Xeoae83mBPUTNSUM6o7R9Gkl+R0voQUQGbz05eeMdhsS+DWbFCYyKyinio4Ex97qscjEOaehByEBXG5UHwDrPuKGty8zrtBPDV3hty2tY3Yeg+eBF+EXWwlnN+1dBiGqEbdlwPxydQoZcp8L6pHryEUlYqjYmAURBSQBY/CeWD7yBmNktHecVBNWdTBxiEYKWTf+ZOLgzgsjYwmmmTs1WKMTWayEesY+DJZO3AMuNMzxYsEq2NXLcR9fbbPlXdTmVnWJy5Z9ZeQBqC/aAGLKmd/pR2s8vfsmFJCPaBEGT2oDIjoXCUC9LCWStmFXZV6LF0+sGnXgg5YXSB0buzWX2JISfkn+S6s4AyhbtxnQTYjNEyMaUwjzbxmq9xvwhilh4gg+d34qQEknFTxIbGzKLgw+GMRiO0+FJQxthfNrfYhbh1ysVhQlER1PQHLGbabTEKW4HKX4TUgBYqVoVzyy/795/+yTWETsCc3UJms5PAJ+Wud7q4KiSypNy8JlqwaYqK8xjKKvb0UhNXMudcGTteKjVUdx0OdGHNRd2Ykwe7ntPIc+CnPLwx8AAnUnCWZZJGcBVJ9XyQoHRHA4VbfhmNmymL4OAj1wHUmvymQgTNip7SRN8FQtfxoDOWbBkxFPwgMn3vDAXTgUeFO02tnUQu+Ic2abZ15SGdHI+DD2flZXeeUdXmmsfCpB4zCeYoJEoZclYbBsRssQWVL5zELvhbfdsfVGXC/FHoAUA8F8VGYRCqSZEMuMPwITJvN3QhK35fNoI7E4J3ci40bL7wZMnZKLFGAuG6KeH+EG11gM9mu6OH9nAotLAyQfvIXirxPpP/UEdmshp2cegBQDwXxUZhEKpJkQy4w/AhMm83dCErfl82gjsTgndxavNMxNaz/6wcSgtrziSg0IoKHU3WWJA7yZu732SBhyIJ8eKGbCBikvZUpakhUHGw35UjzX2SSSOQ9v3NIVk7on3zaPe+R/eQF9eXAhmkrUBicDItxWW332p17QU4iRayILZupKwPxNj5CXsFOn4UQcEhAQNUnK+E+7dgDeXCFxB4KK+yTNbHLg8JG8liO/lwmTDlidwFSNLedTbRcUyYYiv3tbEwI4vpkbrSq1KN4YCZMOWJ3AVI0t51NtFxTJhilkXMBCze7TCAKDp+rpA54Km1K6jaf6irJVHp+uRWVzIPebf0fgAfJQHrMFoUA9NRDBr9YgiA0L/3xVlHyZ1TcLU4QggTOcZD3X5vOd3kdqCptSuo2n+oqyVR6frkVlcwmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhij+5tfny33kyocaBcQdJrcg3U1klZHNxiSQzQGGGIozFduegbSV7fAc5WlrTQ3GFQEZQhOBWNL86oEDTMyc5/IOvdBQcS1Z/X2QFKMK3YTlB2Uj159npyKfv6kBYFjVEwov977UbpJ2aTZ91UXL+mwmn76Rf4uu+Q2366JFBGEgKg3ON7js1pFigTrUY3FcSgVov2h9gn+ILilcht9N9WoNsJmnrFzB5mn7nN3FRLJpBQfyLpQaR0t2NKvA1VrcTg5ER/vlPGWjGbCMfKqiTSgD2I3c4nmxgVl3MutndS4SB7jWAHJyuo05jz/L4COiZB8+Lz0i/6X0a/MisFOPKrkfQNnNRB65S80ce9mLpPooJWLW+G+NhtFdqVHR8Jc7xiDv+D82slJ7/FZBSI4nJ+IjyiF7zHglRP2CDadJ+Y3rJF6OF2pN42YHdaWuKK31LSnu7pTjoHtliOg4diULsFAk6nrDN6vwgtOPCSa7dOB+J/yqz/LUi9uBxFEhfchnsyQfvIXirxPpP/UEdmshp2cegBQDwXxUZhEKpJkQy4w/AhMm83dCErfl82gjsTgndyb2hwZXw2qlh2xdyD3cwJURl3pk+V4NHNqw7ewqA2eOAiq3hln3kDz5L7XBN+snQQmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGBjDvCfRJu8Oti01k6Si4YSEN3DmiYO8IjQi352IkDy0fHQOyPpcrDUpCkUr6rA/nJrvAoKZhMygdqrTZIq38EgRxeY338VClXJNWMmMhSOICGlWfpnFuVRlfGigmYILNBuQKMEjGPnR6VjVf56g8dgT8fXtKwdBdcbxsZlB0Q8MEHZUuqJcS2y+r2ekhxp8kFHnHi6214HHRsri74/On6Qfie6PjDqCacsinyRyn59Iew7Jn8BEY0yCWvYvuQwFtICA1X/PcTvvwTx8HNMoUvh6AFAPBfFRmEQqkmRDLjD8CEybzd0ISt+XzaCOxOCd3JA/b8rRaW8VPfqYGtI26DAmmSBvC4A6mzDr4kH2fS+IPw7YJOQ0ee/NpzVMo8i2DIQ3cOaJg7wiNCLfnYiQPLR8dA7I+lysNSkKRSvqsD+cc2acrSW9v9oMnB8RU/VT5IX/HvOVvPrvnNf5AelmR7g+XA5ocaLcQW3mU5yMqD0ASDde+X52vEnghbdWf8jIkArZILk+XL+M2zG8xFOxCfycbLQfpaP+Mxs+OxdcZAggKSDfV1qIPyi9GwXMGS0x1CCKtJomtPriDyigQ9ZgHig3v4qTpThoEFu5QRlhylMsj/VbN11hRhZ//wW70BXLjF3BNUjK5dzVQYq78BNZcQgTx3VfAc7tyKoE44tM4o5Ak6GeA0FxaE5IXQ2+PzkM0GRB+k50SV4VcWeWX2sZHlCS0Zu5nxcehUWu1XrPYa5UG7OmqKeu9DQMAcjRsQgubI4Z1VVGaIYC54521f3ZY8AlhsfBNNgLK6AgNX1kpPp8eSpdWCYzaNTYUW452e6xED2+lSjU3KqNjaAYillduJwbQvS7P0KsYOxIk9+Iks5UUZeCaGCxKo0pJwTI1iIR+IPtxWG3MepokWAm2uuxW2ga8Lamx+WEeW/KId2nyuF0F2i46YoLyEovhZezLCOY/A4Vmq339TNRz8GpWnj9uHx5/S68uNmdVD0bbKZOfcDcHuJ+vPJf6Y+G9icnxzpVODcMOE72nx9S1XIVaGVDCXiVAG3ZCajw2NRIHobr8lKwX+tiUYu4ZSSs46/e5KE8sADJ7X1zXaQNygzfvsACywioYl2+OUo72MQSfFWRI7KUeLHpaCDcpbb+qr4NwrYiQISLZYFDuHeDW4+L0xi7efBxOcMzlSavf1mjAWgDkOXcgR7uTGos2RAYy/xNesSnvBoDisLo63DcvhcgfOAdw9AVd6bXZ3btaDd2KAXu67GIMXhpBKU5UTu2uXDkp9RgwD2UU2MQcEvloCoxDLMP3WyBnQCayNUguNbNTWwTJUTYXjbr1R86NzKUats+i0g4SCoOzLUMwKOY46fbFdpYKzySqc6QrNNUDGRURYDBZNcooRjdPBs7UDNWfP1lEPpywELAdNQivngzdqnWDe7FsOyHV78BE0QCmDGBclra9YCoD9HFVuKh+uoWK0wqL42azDwcCOdEEXXi01GzsGJBAiAdfR3VVqGHLVT7dsY4x1xUILnOYEyds0wFqhMGziBO5BoOCIgeDQf/n7dg/CY6n+xaJ0b38vNn5yv/Zh5ae4eANaWOlaZ0BV74MGlIpYkHbKaxHyYQ7kZ82ROPR7WRHNRKLW650/Dp16lcVqJ3xZogXNpYJK9uDLO/HpT1hJ1rnAp+Tyy1to2o5mL2IHAfkDQLQZTLx01hxb7vnzD3fNyYGWF3m0P0OW/6mT86Q4qxqFzywW5l8jSo5yU8P7unzTSULNgZfiWlIyfTXuGNPns0LV87dLdyZ2qxUGONzQwPxI9rqxWEkOLXenK86Zv1LASRe3XXw9d29fDm4xczwkhQd+/Fp1ZXCyjMv9ziut7QlH+yw6eLm+GJyZidf8VcV9SEt8Sp2M4OgfAH5ahzIwTwKhnS8PHYp7tuCzz/DRhqfAmtuCa1TIRMzwJmd7heglCooz3D41RVsHeKaxkA6q50R75UyzfOlWPgt9erGW/6rEq9F9s+JhmGtE6rElpNCHgfi4wMLzM3C1Uz3pCHfY7gDvoXrLK3SXlL0MuGiUwGQEYTVZx4h/zG32JQG4RQBSCYIdEE+bgoJpXO+tppcc08KJSIy7JcLfrds+lDfEvulE6WoFCXSBotVWCCLqQbJBBjHFggGD0t1Dkf1IiwGDhwpVJ3A+TKXFlLXDxQTX1/MDTDJQAE/wnpNY/5kps1ErhWh2BVViPNHAei6ygERUx8dkiTwB5zDAePzEYNuIU0QG767MjFAYnIUdeHY8qg3SxZ/JiRQTNajkvBePuEdnNwlSTcwoYtVWZQ9KEzBmP/WAGnPPQS4+p9XdtKpaAtf0R70vrlgWpYOza6RHCIe8NMZDBXrFaLdcsV8XV2b+GJnIVSX0nm31Gu6kabqCnCG3BjvzzKwCtUhWwSYAZN51akawguhtkZHPtEinM/XIfKHEse4AnhMRsf0lfhwu/CYDiGJ7dJMGJFLCV6+MD8HhRUREGkR1owUrKXybniaClE5ER18+F+cX+P1JaDE2n0NjyQqQTa/3Bim/7Pg8qYmJe4H8UoBTNiF6ZhpHhniSmwODfkBDt80uIR7uLD5+6mYkwVX9TE4UgZsBlG0gSMzAxIULnGRN+XbMmyD/KW4ZgtFE/sT+y4hOD68TUYiYTlFvyKVdYce96kzy8B7GSq60hsbtMiRxWYrwOSk2VfHAJRjGimrgcv6pIWFNMfFqr9E3AG+XjTuaETJ7OHaMJKX/sUMxwmpGOGwF/pECPuKwTZgIUZUxIvz6EEXQbSCEz2GuSK0ctV3F2vie21UTrrVAbkjHH6NYxYHFbhdllFCKsixFxoGHpVcT63Msql5AMGKbQqGdLw8dinu24LPP8NGGp8Ca24JrVMhEzPAmZ3uF6CUKijPcPjVFWwd4prGQDqrnSX9z8pAL41/8X8yQxII+8MEYIKBU32FwULgFkzWtIWVC8GG/UyubxAnua3WPSssQQBKP2gEi1CBRq63ZdDWMQgeWJRNj1yVmq4FFgnBj6S3BGLFGptbguJzu0RxU+YM0AjwKOQVvDJyyKz+j8qHLYYlYm66562pJSR/v3Zp9AALCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGKbGkaYCyE9BVylR5jNDgcBDU+/A04CM1lWcFT0yWm0cglkuGrEOJzAcjBTUlxQFHAxuGSnZYX0EYM3dJ9Sk6XAmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCMznRpIjsu3SzY/jOh0KSA0PJccOahIXg/0DdjT2FEsVTnXy3gipISiizbfmKMiKCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCskqz/Va122+oK7nJwDsAwWb9m69VI263vu1om5Z0qcYFZyudeowzmrFCQfECOI+H4HMggsHufS+Il5EoE2kfhZz1bmO1mplEbFYatEA4y0Jkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCbvrbd+wMv8FMIW7LdxkYymYbJh+lPiTID3OvgIY0UoDG4ZKdlhfQRgzd0n1KTpcCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYY6DxGwyIKFqt3nyQDNoiOCwwyGg8ZNQEyBTBZD+F1nwrtiHnuFDgJEm+98C5nWnkLg3rJKPVPtjoII5akJ5AGBRxO1AxU5GqY4slY2jgq3AmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhiTGCnK/rSSqY+fwbJAluwMYUXGh9nWH5kvfcONVzGJTCptSuo2n+oqyVR6frkVlcwmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGEr8b9/4SHuyGuhTGRde6AhETemA6yMqtotQx2WQo2Dsoslsv5MeY6+4VthCHlLI7I65XoDXzXqQdm1nWKm0EDAqbUrqNp/qKslUen65FZXMJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhgLO82WR8mwLYpvWSVw6HJkep5tfJZVzpp6XfglHI5DqDDpm0F4Db/r7uM7JSuAq3hwYfyWjwVHHB1e1wC3fXqIRp0gbQW2UYe4SbCPKsC2vAxuGSnZYX0EYM3dJ9Sk6XAmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhhN2/rQ6aGNgmN08/2IaR9oG5avnEMsXbarjkfMfK/grE0ynijEngENeMStQupcQGBgF034LO8+u4H7lClcJxCoJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhgmTDlidwFSNLedTbRcUyYYZZen3eV/zCspQ+F9HGwIJCiBmtH5QSJcIpTbZxueFyRBfx5CEIj/CDU+4dzZNzrUQwSlE6O+kxoIgSDYOXQo5Aejv6yGL+UORtvTwGkCM0wIKL9IG2GL2RMaN0/PH6scNTKfj8WLTNbpRMtwt0DXnCVsjaPhwz/l4MxQtA0+AcRpYHCFwhHvGLxBGJVetZ/oMUN/1QY2UyHnWsVAHCcZ0J2ydqK9TA4bocm8MoMj4MwPWHe4otTOXjDF1EasdiYQaWzydNAP2XHJbMc0kkbdQJB1ASS5tLgDbPvyE6ApCJQmTDlidwFSNLedTbRcUyYYJkw5YncBUjS3nU20XFMmGCZMOWJ3AVI0t51NtFxTJhhD6QKLvRpZA4bFi19IeA4YmfXycpsyewU1dhKt06DZLJhnbe/++2cGJ8Ip4brNg2x/WsEhLiRGS1xcLxxw740EYB+L3a8I10YkoUdZHx/W4J4hud+LwiqBH03x7Og6cPAvn15Q/YBdcyHkiXtAXhAoPzUTJg6qWIxNMzTEy0Gf5CdSp3nFhzFhP0GY/koyOhx/r6tg/eRgMUifz5oQksXwS+3VkJwsdCQyb56EmlHHcHgpc7Vw13aKxcMKUaohc2QNZxcuaOlpPtCCpfRKIpCkI4WzZM+vy4OIfezCFdnDxCFf/2oBKeiwmKEXfrI0nUiQEVWCKdTJZ3kJUaP9i82kOULvNeSGc2rVUikyE5kA+JVojlBgFOrIy8hLVl/reBArjcCMo4ZiNuYoqGs7E9C4qFMhxKv5FvH+1syecl2cLHA8gt7SmN/J7RdhXxoQk+CSIFj2aY7WZSOfyha0znRwbN/2Yx8LhJ6Cvi9r5t5oXEUimNHPgjQdMolS3CVfWoRoeYe1lfF67TkMDDb9KgbMWFjF5Ka7MryY3xx2eqTH2EwVj6D3Kyg26OU/F9+xk9xW6FXHxUtiokVT80uCMh3gbV6DdzlWFezNym0vWjSzUId3UQ5sCB9KJxWyR1vKylRnxW1Su6+UPVddOyHrQonIEQUf+uTOnh3umelOoX8M+DwmSP0Anw3U5JBJce/VLjwREYBu5MtOWMaFa2flVmT4EuM5myP4XX9QrLW4PW7M6BNvvic7vnSSGr/RXYaLMvAqTH2oTN9opQDMTPiXlhboaB33ZWdK4N3eQ34eIjHT6CUw1Zrwvm5CYm6chK8eroxu3tKJ0fkd73qpW8JEr+CkaWH+Pd3S8BjjHCb1yTd7/GfXpDR1VDqjk6bfQ6WMozgXf8xEPlaqAGvLoQMeIYbYYvS2bWEvD0joSM1qTfNSBH5wQ1vP9K7FUV7ko4q6YzyUZDaLokDIDKA4Nbq2GD2oUaUfXctyWAnni8w4lAoVkCa9ZoWW26HoL/aFbWpNlUAcExqJuT67lZGmaNqpnFAMJ3bwVnNVd87gRrUoUGkoSCbnYXRdzEYMQafvnxF47mg+/abKcp9Ti4pfOPgKVTh8Eb73vdTqgq0h50nR936ssDV9eqjDNxP0TGlTZFp3CwhWAJ/Qxzh6DDn6bQar+oMYJ3bwVnNVd87gRrUoUGkoSDhonxrD40YFuij3jELB10xsmztI4OMX6GxwY99tYHN0H70EDkS9S0T9m34FRE5ZfAvUQmPXJ3ZkMVsuBlNzB/xJYqsb1jrXDvp8rdam4fgAe0Lov7Dgn9dY1MHKoQxKAEQiwwgezUt6KVNb8rmM5jRnE1Pn+UmWNo4HD4vqDe1sF50Igy8cUzxCRSg2d2BgVGZ0QumOfdsvQ4KCGIWz8qAVO2vmtlA4saJwtJuK3J3wJRQdA7QQY/hP/sZn1hslTIjXJdSIEAOi2Zy+InLk7jR64MB4/Q9OH80Qfl8rAEw4AvIXhXeMtPeBepeZF6VtIAPdZPScKjFcv4AGZNu2pKh+4ydFIxc6vVLQOnqhmMZQfI7SanRBDU/6R45YijMTlFhQMWk2nhVryZyyl6/sIbQ18sBWEm+9UYVdiIcNpmpImLGju++duapzc0GZkvREdCP6Zv2l1Y8jQwsslJ/yHIAEdu02ocGqVMF15vQ380oEXnns2kUHD4ibV0MSev534HTk3PWbSxGtYEsf22gw6QwTQOdQcLv4SAnCGv2lPhScjX/yxLfB5S9vlfGwNyT+tCOssANVsOgxBIX22x9796QT/gDh4VfD49gF2kgWWeD8NLSsmhR5DCaGorZt+xKxEIB5rWEO26jhzHkZKv1tf7BgxGD69xhv7dQSof7tFfEQCn5RF35e6+G/8jmK1/xmPAVJsV1r7kDxHLwVqiWCtvBy6etMuuJYVtDtBDD61IJohkI1SgEzo+n4QgWs4tFg1CLSzAR7REAPylHGfjCkwOg5MWnatKmeetYIbIM/ZdTASDz2REAIUgS5AZPL8pzXzG9USMhcPsXDzUQV3tfiwVx7VxcqqEAgO3alJKWRd8BMntb+QI5zrjfRYhj16y1OJBgQ4+BaVTWPwM4XrlKBFlRrc+/V/DCCA1MZkIinrP8AFqibMx9YLm+galhokEuKBCZMOWJ3AVI0t51NtFxTJhgQiaIAkNDVEsqkztelvFMcBBnuz2K2MlgQYSIx78E3QHvBUvbGcPtA2iLSpiqcKOx8+Lz0i/6X0a/MisFOPKrkfQNnNRB65S80ce9mLpPooJWLW+G+NhtFdqVHR8Jc7xghbt+CAUye8jO4TBcDq+4AaAzlmwZMRT8IDJ97wwF04FHhTtNrZ1ELviHNmm2deUhnRyPgw9n5WV3nlHV5prHwI0CjBXMqAn4wsObQrVHnbD/U995FwFiokGnCfj299QA/MudBtIzKTO/x/o8LkzcQWjIK+nvyIue8JxBuTLJwkAahi3l4lj0HpcHDCtYv9dh6AFAPBfFRmEQqkmRDLjD8CEybzd0ISt+XzaCOxOCd3GsQtcrYUJfJr4k9PqkIFmwKnXoFrosOoYDDBnNQ1btcY/xGiakPSrMluo/t1ACj1IQ3cOaJg7wiNCLfnYiQPLR8dA7I+lysNSkKRSvqsD+cPgOmwvoeIQgDpHxyqG2qLJz/3zAjCtOzTtDknmR2NPQm/8OtWuYcEm7ago53ppI4k/x57Sx/mM+DOztwR+GKjJ+Ftq3W+1bqxbJ6jmrCRwCbUnSajAMpJTq+VWC/OIA8XS1ZG7sgP72W83bVQnUMWKgX5i2QRi/d3cEzHtsVAnwhy9fUHf5K9HTrZclLHu+ooCz5F7WQJTqABC7UDuefkEIN7WLo2SbDGCoCw1PUp7yRifMjvEzDuuzFGlPRSiSgWygSjvWbvEH4RCj8jq844J95n7kjDL5yRGitrUZOTnxruzueulih0tDp1FhOc674MF03jCU3Lq1kC80HlJ/kXDFvmS3Dj376IJw5r+pG8OBoDOWbBkxFPwgMn3vDAXTgUeFO02tnUQu+Ic2abZ15SGdHI+DD2flZXeeUdXmmsfBVS8/W5TfEkhM2wB9roAlkYff9N1ANIy/LjuhzlymMlGKxVBfTt+2LdRUBRYN6YsyHiYKV9XddjyQQj8c8IUkEgHnDxJFyjhSkefFZN9ZCQHdhc9kGJwg+PDyHwSccxDiFikwneWKwpn5Glk2bs5aAa8SsKzONFcqvZt60hiKq5Jg3+iwaJLF/9byEKBBeaOhR5x4utteBx0bK4u+Pzp+kH4nuj4w6gmnLIp8kcp+fSDZNAS0kbYqfQscJKriNf7yDKxnQTfgOyCu+iI1uuuWcAoqfV4785kw0F7I4qi8OyITxXxSlocP4e4iL9YhmZjRfqJB8lp1Qkqw83AzVyiSkhDdw5omDvCI0It+diJA8tHx0Dsj6XKw1KQpFK+qwP5w+AoNG/XR39RyiHuMcs3m0ncFkuDPKPCf2s+S48iritKiV4bjueKwZGCFe6dlYkCAQAgY+JWuOUXhdkXETZuTIXFd06S77HRtQiG3CKaLSSKezTZonlxpMDVgdIKr/ACymgjUowZbhZrRsUlHvGa3US52xDS0RQMUpP54g21IuUBtwPJPHUgy2yNjlkN8fRRSbnf/SCh2IoSIBrKna3KIYPTjbCRl/VeBIZjEh2gjgCFd3NcLlwTMbNA4fgX4wpviRejhdqTeNmB3Wlriit9S0p7u6U46B7ZYjoOHYlC7BQCxffmugwrYg7v9MDuXb8Eykln0t6UApiMrBKHZO6ELsZ+xjwc8uidW6GYISPBvXeH0WTFnnh3f/HuJM8y1vLVg7mmLa2bnFc4wCcBh2djrgYcz1xzk0ZEa+fbgrzUR1QCoay4ImEP5uQ7qjcNktS8yg9LONt8m9lCg/GIcLQuPUnP/fMCMK07NO0OSeZHY09Cb/w61a5hwSbtqCjnemkjiT/HntLH+Yz4M7O3BH4YqMUL67X7BGqwsmRhWkMKXjtA089a8pHi6Co7OJoA9h36yfQ4ikgVxWP0hj1kJdg0xsIuzio/WhjSpmGgOEF6sqPHoAUA8F8VGYRCqSZEMuMPwITJvN3QhK35fNoI7E4J3cM+hfUQLgS7Ndq6ssV85maJp++kX+LrvkNt+uiRQRhICoNzje47NaRYoE61GNxXEoFaL9ofYJ/iC4pXIbfTfVqJyowoAVfNfWwObXbBUeTwB+enn1vIIO2FBTH4aeP3wgBC3FaNQ/dkSpU7sz7uWc5HkQYzIO84fLVQWno7cf1IhTXKb6rvaLfBS2jeGj8XGsXTBNI8yN0Y1ehpaYQ5w8dBHF5jffxUKVck1YyYyFI4gIaVZ+mcW5VGV8aKCZggs0Pdktsd5pcOrgWN9Ncf5+EFI/nZqnAKP5OWvt58rl5PwcMXcrBgezpj8ewrcqQnmkn3zaPe+R/eQF9eXAhmkrUBicDItxWW332p17QU4iRawXFkHPH4KqrUB7wOlklHRAZON49Z5bfI/vtjY388jWMAY6Tc0vh/+DEmwfSGCxCZCc/98wIwrTs07Q5J5kdjT0Jv/DrVrmHBJu2oKOd6aSOJP8ee0sf5jPgzs7cEfhioyNYCzlqZCJ/18CPkenLev8o2i009dgo9a4GcAXdkXnEKTkVexHelhoYm1gMjt7pkRsgHEmJwBLQ3Nx9xQX4LksEfHl9Oki/x4NKAowYT8NgDeCmOC6w1paIYTjusG6zlidwWS4M8o8J/az5LjyKuK0qJXhuO54rBkYIV7p2ViQIBACBj4la45ReF2RcRNm5MhcV3TpLvsdG1CIbcIpotJIOrawciq6OJCjFqxChkoHSG6TlZ23m6H8fxnWA/T8hOwzfleJ9oA2mYBiGrB/uCPona0EKl7k2af9Azz45aiqOHpc0Zx6GopEu5qbjKQ0pRABqdHEqAB7n8glrIeUJYdAZkDQcss2XtwYBzXSeEe0dGgM5ZsGTEU/CAyfe8MBdOBR4U7Ta2dRC74hzZptnXlIZ0cj4MPZ+Vld55R1eaax8BfdMKUgHPCSB6e964YmDYymA2nYProOFsC+I8fMlHGcThshdAK3XR/Cp+GrzcQ9oGXsho2uadFHI681ZTi9ODiE7MBr39yGT7gRXoD/GEuccPthsVzOguUsrlwaHQCQ4GIUEjrDfFiSIqrd+J3esxAp7RbEnAafMfYyob3VJf2sg6yci3CTub3B6xQ6ngzIuCbB2IQDTuE7g3P6FMYNEsxzLvg8Xjf3suuXgbMpntAcXS1dvNt0IOjyiZYJB4wb7BsMPc3PJdnIXdJY7gwmPmQyH4//7HHaqeC5301zQYsEY6S+LtN1WAcVAa+tBcMxHDk2w2yGpw5iXXg7Bql2Q+h8Tt1soIbdIY63t5AxZPfIZDB7vbJNFnjpLheOxhgTsFKt381TaNwThifuasyyzHwO4ji0p6iGFsebkr1/2qBgSkmumb+tYRZl9f3xN22lVKYYBKLZ/Y+ZpbuXJmGH3fhbAqf5Sq3KpW85WUiHrKlUMJxHLGk2WcnBU6E0fEBSbD/i5M4Q1tcAg1u6830juNA3oq1ubWi8B9YDp6WZsJEwhOzAa9/chk+4EV6A/xhLnHD7YbFczoLlLK5cGh0AkOA/fQm6MSGUF1m/5aqwVhS4bQYyS4vCGTkkuOEPp1EmLCFz8oJ0Ubok/kbdVCZQ4SBC4rDAbdyn6v7Y5Ra1R6TgGQTSpDssx654fuHWWVCFtEaMff3iIaUG1fhFg1GBRLCPVbHBtGDPRmaPLFSB79AAjJW5HJ/ccgFFHmSXRYZU6AprAwx3YEOo8di5rpyyT6iSkpgw1FxKWlF2B7OC6A8UV80raL/SOhrluq8P/pHNlF1ueoi+t3VbEglSbe0o24Ar6OBY5ZAI4YpuBIvhWp2QE3hs+RuePvarg8gQ5dHH1Ab3mzasJPvM7TRnU3qfVHRXmiG7GOgjK5IGy4hL4Oi4n67J5qdHQ+5ww8L1BgI64KeSkXnrakLeQLpseHnX14B95w7Zl7Nv98m87T7atCr8ZI5CihcqZCcb24wyXy1IJDu0YhWhVZLEO3lrZUE8Dbibg6K/ewYoqqGtz69n4y6IfEo2/eOG4iCGqZ1Ngn8HZJj1RnxB/HJDf/pndPuS8JhEYDWeyPesENhHnNfMglqUTuya0sXS2tJPTx++oPMp9Fx3LS+/3g3e6v6tyLkNIsAyPKcpMykxpMKjLSX836iIGKbFyb64cgvv6hBFgNdjeAMyOTYtBXTEftA9XaSiJFQpe7b6yCz/wPVbSotfuq4sH7Kn8yNACONR7vH99AKuQHWSDSaw20N3C1jeLeTu//B+VhNBt1/tyCHYMc4ld9gkkCjPtiGd8IYAedhb4vfHaDpAP4UK1pn6Hr1swFp4lWRLhyBD99x3ylPDZiCtvYRMI1KzVtLfHbDi1/OlkhOPhIEI1X+CPmLbQWARbMtnnDhQLcnDuajm5i/Il15aj6EUU3QhZZGmmz0l8PxAdl0ygDHVTFEIl1Y/3NB6ydz7EHh4640AlP28SNXaXRSNhMdMFHdU7aQSOdpdyX7E6JsdvBjLana3Z3twM2RH3LM9MoxH9FAwYDroCNa/RM59e7LAfTmuS6u+t1lPu0Tl1qPwjGkUeD1xFXUFaxvRYaE/yrxsS3y0lYFS4Bux4kLyZadkIsJftPA/uKX5p6SbwxgvUEIaIafOEkLlv6IdATyJ2CBgZQIVY4bJ4qnPw58Q2Bf0SIfKU73ll6XMfMCnpZXmnJj1RnxB/HJDf/pndPuS8JgqXWswCkPLW7NqTv/oBotkdjpKFp24NXAAAhg5PZ7MwJ2yvkQPVwtZlcXCqDbpkPwXbQf5gBnX6w2Z6Bw8skPQT2uuHhqa8ycVwFZZTrGr+EOu65vAhkT9w+nmLEs/OwwqTH2oTN9opQDMTPiXlhboApbFOxcp13BwA24eURmaMAtqY3/5J8HYr3tAGYOdI3BoDOWbBkxFPwgMn3vDAXTgUeFO02tnUQu+Ic2abZ15SGdHI+DD2flZXeeUdXmmsfCYMQUec8XGLydhly9xTUzgLQsivRUYZyKNNOUJXQVWNAByXP6GHSalSohOmc3iU3CAMuQqEJ0XsLj03qYwjIqoEtK8QOzaPqLnfYohRUw1RDSkuXptFwE92SexRizr0XwZGo2OJ82r6V8N18z3LS+kQmIyVmCwnHDxpzotdhSIEJz/3zAjCtOzTtDknmR2NPQm/8OtWuYcEm7ago53ppI4k/x57Sx/mM+DOztwR+GKjFYJrTc+jv3rLq/WT/I69lhLhFpoTiWp8A3Hhz7y7Qe8nP/fMCMK07NO0OSeZHY09Cb/w61a5hwSbtqCjnemkjiT/HntLH+Yz4M7O3BH4YqMnx3gSKxsIvXhIkhquXHR6FQHbfd9GD2q/Ltq32qiW8yZZkNxeu8jN1XbU/21kZmYC3DE+NBlXuL6EaxVPK/RjHspaffGP2h3xCuPsU7gAQSb5TpQ4flYwadbFhfYRjUUEeThdSkrKxiVYXZAEjjrXAuZhRobDTF7t4PUW2lxgEg0pLl6bRcBPdknsUYs69F8GRqNjifNq+lfDdfM9y0vpA9P667VqSiHq7+eoF65qiyh21b5PYOHLiiV8LMlpu4wSkmumb+tYRZl9f3xN22lVKYYBKLZ/Y+ZpbuXJmGH3fiko9g/FuN4a0WIf/Ll5EF4S0V4sMkeURsPXKsYiFIFtF0wTSPMjdGNXoaWmEOcPHQRxeY338VClXJNWMmMhSOICGlWfpnFuVRlfGigmYILNJw6bh3F6Y7sSmIgUOXqFpgsYxG17ldpnSXVGhpB+FlYLCe+DZ2aNQ/tdzbME7k80HzOKjlIxrZgx8IC7y+icYiffNo975H95AX15cCGaStQGJwMi3FZbffanXtBTiJFrHVJUHvBlr3mFVgWkFKvyWh0aslMUpHBYrCy7Yb2PJ2YhOzAa9/chk+4EV6A/xhLnHD7YbFczoLlLK5cGh0AkOA1cueR8yJpw1uBncSy6Yn4XyIa+PpbLqd+H54lainb5Hz4vPSL/pfRr8yKwU48quR9A2c1EHrlLzRx72Yuk+iglYtb4b42G0V2pUdHwlzvGBrujc6aYviVP6SnDMvlS4SffNo975H95AX15cCGaStQGJwMi3FZbffanXtBTiJFrHVJUHvBlr3mFVgWkFKvyWiEi7Jrku2yMHEfZwQRqcQ8nP/fMCMK07NO0OSeZHY09Cb/w61a5hwSbtqCjnemkjiT/HntLH+Yz4M7O3BH4YqMENxzB/NhEL8R5vPoR9QXVHoAUA8F8VGYRCqSZEMuMPwITJvN3QhK35fNoI7E4J3cDenx29xNpTEHkFIfG/dZ5EC18YrTWE/IXYITKr2jixQiSfQwBNiaADaRRDHNRk88j0bKIL2M7GIClXmemq/lBGzDeWgqZyubQDpQXtQnJGwmTDlidwFSNLedTbRcUyYYfPi89Iv+l9GvzIrBTjyq5H0DZzUQeuUvNHHvZi6T6KCVi1vhvjYbRXalR0fCXO8YXM3GO+klk9xsKkzFFifazHoZRMzVSjyxAtN7Hk/7L1RAHZqRXhyyyrQE5QfIVVvMhOzAa9/chk+4EV6A/xhLnHD7YbFczoLlLK5cGh0AkOB3Mzsukdd116318jGpjZREJkw5YncBUjS3nU20XFMmGG25xzza4dp8y3bz7tyo3Khnz6lHr/xngLQjhTkCQQ3cjI6dKr2ybRYo4pxNpkNRQIMyAcRtxqtvZpxnQERReUxgHbkTi5ZqRPHjtHOw2njgAgQLex13TldbeNU7z07gBCZMOWJ3AVI0t51NtFxTJhikF+Xppax2WYmjnC15XHdcSkmumb+tYRZl9f3xN22lVKYYBKLZ/Y+ZpbuXJmGH3fiko9g/FuN4a0WIf/Ll5EF4iq3JffE9APHl2EVVELkjwJF/xG4VG8545h/Hs6F+Q6BpBF/MEjTOzoVYE8U0FwtEJkw5YncBUjS3nU20XFMmGBjDvCfRJu8Oti01k6Si4YSEN3DmiYO8IjQi352IkDy0fHQOyPpcrDUpCkUr6rA/nJc2Mh3iriPtu88s/DYCSZQLCwUlS/LN2iTbijwZQswsa+W1hS2CuftdQF3OVEk1qIhQNuQm8nQojDd/NMHBzcAPXcf6ZkcdTpR5RALtLBZ4Uiikz9JQl/kF3zWlir8Q7IJsoqCKdfA36ih3diIz5JCE7MBr39yGT7gRXoD/GEuccPthsVzOguUsrlwaHQCQ4FJXQHKNQFA+6j9kA2T+4BgWFS8kea8azpGCF4iJOWc0pgNp2D66DhbAviPHzJRxnGqeq/WhqM2l1xQc+TGTjdyCPB7pJE6TvJrRcD48v3EkahM9KSovcrdbjtCkxrVj4HBTa9Dgf8XcyDpw4ci00AR4uGJqgQqwfgGxeL2Kl3bQJkw5YncBUjS3nU20XFMmGHz4vPSL/pfRr8yKwU48quR9A2c1EHrlLzRx72Yuk+iglYtb4b42G0V2pUdHwlzvGFzNxjvpJZPcbCpMxRYn2sykF+Xppax2WYmjnC15XHdcSkmumb+tYRZl9f3xN22lVKYYBKLZ/Y+ZpbuXJmGH3fiQwyIIF/Jnvd7aPcYCscqgBAbwgZbew2648+/5ObFnXF0wTSPMjdGNXoaWmEOcPHQRxeY338VClXJNWMmMhSOICGlWfpnFuVRlfGigmYILNJw6bh3F6Y7sSmIgUOXqFphtgMwMRYeyLl+huusN6pBwlETUVHma2yGiz9b4TNiZBGhcBLFYFOzDqnHhdWogIOwVL0lSS+QUjnz2H3fk4PsYegBQDwXxUZhEKpJkQy4w/AhMm83dCErfl82gjsTgndxqrX7uQRgJ5ga8xJg/iwd4Jkw5YncBUjS3nU20XFMmGDIC0azlujdtO5AXeT2F/4RN2/rQ6aGNgmN08/2IaR9obvqbq9NKJS3JX22WwdO/wCQpjtFxIuc+C6zwA2IXIBhYtJsomeb3ei7o1PojU7zkGnk3QffXr2uG5EM668nz3CptSuo2n+oqyVR6frkVlcweUmMoj11avyxmZ/oMWjj4Jkw5YncBUjS3nU20XFMmGHG9nPmXqrON3sFk9bQzkFhqykvXADAu83OfSp0Ak7FoF9mKMcf1RW4uY6QN3cgXKB+rYbXoplnmuI6OeOLkHswvtMt6Sfi/35nmp3yMm/2MChJs4O07fS5FL7pn022AeDOwVeiDoY08/rPdtAzd2ahk2/ZNT6ji6VBpkn5mK0PgNVsIb64lGHn4AGnwKcdbuH5GzNeo+CzAKtCtV2/JuTxHYIITdVP12roBhvq7Yr6kp/zCV2ckvVsgMQP4EtAdJI6rJ7DAiwK0lzOZlvVybOCcRFRcsHciDuHktCMnsUDcS1mOOfCQYopWaLPo9HVH+EMRfOWk0SQPZfzZTqQOY6AsTP51rM1bg3ThXGAE37/sdJPZ+oSAwkvBKf1miDTd+D3Wl2/bh5eELSRBQOaLnzR1cmsMrysiXYOfIeI0aFVkM97hdWYww2xd/p4/nEjfbIlaRmp/hX9g4Ri5JDQsxxQWpSVh2u+e03n2+k2/0eXIENdX09q1NeukK2UvjIH+TKTkVexHelhoYm1gMjt7pkRsgHEmJwBLQ3Nx9xQX4LksAP4+18t234dLuz5XNKHG6C7iGWDH1HTCDDh3fRN9V1xlYOoQir19QQ5RQOcaE6d4Jkw5YncBUjS3nU20XFMmGHz4vPSL/pfRr8yKwU48quR9A2c1EHrlLzRx72Yuk+iglYtb4b42G0V2pUdHwlzvGCxrQPfl3lXNqDkexiKA9ZgBu/j+tIVyz0EKfg1x/6I4p4QSO4pQiVfm37KF/y9ECCZMOWJ3AVI0t51NtFxTJhgFYGWGOrOO3Pwfu2vC+kOAgQa9sWnu4JY0W+ihopGA3BayUDVQoSOcCVJe6RHo9zQUClZZMalohRx2LflmWkAAm08Kfw35mZZ7bm6Hq4u/fFHnHi6214HHRsri74/On6Qfie6PjDqCacsinyRyn59ILQLua169uS3GLAjzV/79hJ4H/ieY7mNoI2pX2JSj8SQO37L3XuCJ6f982Nudk8j4geQ2OBHOnwWn/qlw+MsSxCZMOWJ3AVI0t51NtFxTJhh1oiumV5tsoEVX8BTXvnXYJkw5YncBUjS3nU20XFMmGC5vt2NB7iGJBdAviafLjKRZckdSWXvy6/apk+J6OmgAGpSr/N4iVjtNaO9vXQz4CCN3dTuGIDQMDknnnBNEe+hwgYMmAWxou9h8o2SE2jxASBP7VqrE01Ugk8DF4yFPNHMMA7poQf0vkVyvFhWfKggEnBw0PTtsDOOue3+nLLD8UeceLrbXgcdGyuLvj86fpB+J7o+MOoJpyyKfJHKfn0gO9rJoLkFjWEGZxYaNQ6PkLAxoZqMAErjf01TNVJmB4J15poD7+pS3aH5K1I756IwRgXZk2P12OSdw1sW43FDABLGeSpe+4lOuaP+oYAH7EGkCfrcB09yTb0rnJwb8NoSjyLT4o9ylYtys3AXERzsIo36dVFF1k6CHsEClsgGyVAQrq3dnL66Gua51W9YaMMAQ95HGryJxn3HT2EiiLOHwLfTABwkkKislXK4EJuBRdKTTPLiShPHiHpiw3B6QEUSmezLv3TVOC+xjpj8xh8kIWRlfBh4qKRIDGkuMtUhoTGmNlRYnlpuA5mz3woH11hQqwgvbYFEOlQzGXLdssnEQY43QnmjeQ2IlA/LeoLAWlDWIhS9mcHJEklegvvaiStQMbhkp2WF9BGDN3SfUpOlwgy0d6C88v9aeTGB0hswXLCDegMz2dvWQbEvzVFdTQIgkdQtSBlSZP3SZRF7VbhJgJkw5YncBUjS3nU20XFMmGEaLhFnL+JKbiTWJfxNkTbgqbUrqNp/qKslUen65FZXMIxGXKUlk6pGFkAPvlESH+BO/YYWOh6nNvbj2qxuIxxh2/aYeN/yWg/lvyPT0BKI4Jkw5YncBUjS3nU20XFMmGD28CW5Y9J6Txtqq3L9DZ0AmTDlidwFSNLedTbRcUyYYJSCI/p0AiCNJRiaYaWDBdHF1JGgRr41sqy/e6YgQs4wmTDlidwFSNLedTbRcUyYYJVenfdPR1jrbZzKpKStEyE9TETzY5qiPTRUr6bDoUAAQrHAoF0nAa2laxcTVYbkIDG4ZKdlhfQRgzd0n1KTpcEAYlD8TrEqfaZ29b+S6Hihhd0PXmsE17Zd8f/nty6qAJkw5YncBUjS3nU20XFMmGKgo9rdylmYwFpcIQDi/P3hjPlV1eYDeo4NWFSgmZtrYJkw5YncBUjS3nU20XFMmGGJQ+STfh40u+ZhQMZ1Pgig+M3lKJZnB4OOVk5v9tOJcKm1K6jaf6irJVHp+uRWVzBWN3UC8jRHuxPLajoG9qjyl9c/q1u0ELEFuS2sCNcu0Jkw5YncBUjS3nU20XFMmGH/KWPge88ekFCObOy6CbEBukfARI6ILQ9mv+Qvu+wEwJkw5YncBUjS3nU20XFMmGGPmm5w1LiZkbWZ+GqRED5BNoe6vZBWEI7UsECEopDGwGdB9pDFS7yeZkV7sGM6eoCZMOWJ3AVI0t51NtFxTJhgoEj+4aHKEcfaF8HSbMfroTdv60OmhjYJjdPP9iGkfaCVqYCGQR3Di+pyyLUYMGJybdfacRXTItuNiCIW5E0IsJkw5YncBUjS3nU20XFMmGCIED451+CUgeRBo2M8hy1B99XZgeQcEIJxpRd7BcTS4Km1K6jaf6irJVHp+uRWVzBc8JzaRTRiAZfv/IBDFOOBf/h6aa6Oc4/PDHyk/3dZ4aAzlmwZMRT8IDJ97wwF04FHhTtNrZ1ELviHNmm2deUhnRyPgw9n5WV3nlHV5prHwKL4XUIbpTHTISFRxvOpDWE3b+tDpoY2CY3Tz/YhpH2gq2vx31HkQWThhoiRcNIuUiB35AsQMsC0srFZhxSP1qEhMy4WnSqMTKv61XH4YVBAqbUrqNp/qKslUen65FZXMO6jhZURVUNs4u7KlqHxuQHZdMjzXlliNMPtfVsAw1ZAmTDlidwFSNLedTbRcUyYYpJyoR04jVXuXtTaAwXE/HCZMOWJ3AVI0t51NtFxTJhgrk3iOxqxE2LH6RPvX59QQDbAjaAJla4lKXW87CBaMIDbbPEu0ogcjMycvRVh19KQoxFnMicL5Mn71Mt+jXkacBrRQ+eICpa1aYaD96Fr0zI74oGE9McYs+TuJQOScnyADIRWeVfG5Z64lDXAS9Xqki+P0nA9FVozoPYaXo3TpLJ2H3TcFcwSLYUqb+iK5mDSf4uXvj141oCSInnVVCAxEKSbKRbdcVKoI8vmwiHLEGHSqjyrlvuTntr6CdtXJpwBnrWQwS8A4arMS4l37sGscJsFd8OcVFWCYRVSkXFwgeITVBW7reXNSxoDnZs0dMjANyTHSDctzRhjG8EQ1AYvkoVlvrSfnBqyl8PfhotX//EBCML0uFOKLY6jVGeTfSggd4UxxZy9ED8rc9bndpnNUb1jZ+zu5+fHoghpYNilr0FXDfJJ+6ZU4I6JUrzDOtICUNq2SQEUvHDv8RYDGK9aopgNp2D66DhbAviPHzJRxnE4bIXQCt10fwqfhq83EPaB3bm8H7IrsF7veZ4s+Ib6EZmokGHz9bA7bvWnAG2JSsAxqFkjHlatbRrdpJEzX1gQd+HvVrKrlIuTPi7ogVkssmj2spoHuN2Z1dNZ8/f3NEFh9GsZWdztJ8YHtl2k/uqBVnJ8eLSRXMoCN2lUrroMUSrfpN5hYIUcjEGeUtndU4Eg89kRACFIEuQGTy/Kc18wIoqr0QOl+RUoF6mHIjdsoFHKhUT5tqaaZ6a3lIlR5rB/1+rsOoRVcJrOWJRXLzOBQaYK4vFV2JFv9MFm5fFYMN9MmDkZUlIPgrsaF8fje6D4hIhBGNhQNsZChIN6HatQoHvXaOL3Y3iyREj7dLtm0nTzBU9QgiBHeLmnKksMSYG+GJp9kIAimnp5dMi0+17CbUnSajAMpJTq+VWC/OIA8Vpr/vtxVJfBdQcx4hbBOgDmFqOaqTgbeiEGRp1Tzc8yQhZNAUbF1xHGuROwPdkUsKjSsPXUqMr/FQcRTjLDJWAY97UfQxIyCyP6o9xLKi/QcHrJ6MmUbpNSvuH/rTX3IY/HHOI2rnQcahvYV8mHLMJJlQdj/jab2gEHkNceWhUBz3dFrB0ahZmVK+ffypdr4p8Jw4rSW3jV7eUR9EtkjtJ0lgRz7AS5LNV59J/El3BwU04g49PwnfDx8Jky20sLASJ4i3UskPAwcq80Bx1qPaDS0rJoUeQwmhqK2bfsSsRAC9wDs1tY+J8bs5aRauJbQc+E8TAteF2Tn5NUXOEe+RKFygnXlC+6QtHNSZSiqG/hTN+0EnbJ5uoj7lwehSALgHI3YHkHX+C6WOPMT8nssCAjMHDeBnylqEqsG/0vV+9g0tKyaFHkMJoaitm37ErEQnNiN+qt6Vk3nL0Jtib1ZyCcgLrR/318uLeyZYIAMm/BXOpFdmnRjQuJvD61ss8dwD6q9UD3G1SFopSyjIhyPmF8PvoyEYjBMnTf9l5qz42SffNo975H95AX15cCGaStQGJwMi3FZbffanXtBTiJFrJnpzzUmWjsWveqcN9suwrQnIMU3/mZNBqdgSlK8tmgEZe7u/o0lhBi7rBiZcVrkDJdRSLL+Zd4WGthJxR0q12h3azYfFRpmjWXxAR3l3L0cd4tSl7RY6FGRxrJy+4J5GB02d1/iBKNKA1c5+I2Sw8yFvTVsEFbY6upyTjhU60BMga/DmxL0gbWw/uk9ztfmjJSC9LCxTPXZYAZqHP6ep3yERWMPSNN1bCwcQdeGYW68oTiEwuA49DsjYZ0jaVOuiEUlkOktG4Wm6n8my+F68mAPcM+gwHyVTZkdQiPpGtoUW7QdGDJwrUXXwA+A6bb7+HkWVLbitbUNIzFA/8fHxtANU1xFzSd9QwgUB1xRHz+Ec5yc5ZshFDdYc22TOn2MDHkfeAYbwXBVSptfhfEX+oAqfCcEclPoQ28LDYCTBA3kFnPjsDP/cyhZqX6QOF3skDvFedmO+NBELOvhRIhrnfASA0Dg5BQBnECxz04Idi08XyMyxUCA9yAXWasmznCVpEGssDVEXbdn5RO/oubGTkwhhLWklnMusGOg6U0rJBQgRSWQ6S0bhabqfybL4XryYJUvCv7Gh3/GRgaFLb0/QuwuW1aKD433id/wkHmo6aO4pVNTqHdEGIkY31vDx0+nBCYcmIIBpPZMemfy2ISEyNgPyungug4KalEHMN6/UxXwJkw5YncBUjS3nU20XFMmGInnjFehd9JsC9cY8C5caTRnf9+JkmnU+owehP0lAUSIb9BkBn8D5fdZpZtowXuYZAdF233ZwaylD1r3k95rFoRHTNHJeVKXI2tybMLKJ6hklSs9LQTxkAEr0OJCr4bKzI8hsn1cSdrIn+dkMR4rdGCGl6ouQsnsebUraP+uQoTQaB9i32xz/yXxKupEk2TV8HJ2Q0fJUQFL9lc6I/TkcPQ8GV6V+4VKOfPv8SRug1ZwkJASDXKib9JfJMRoDNl9EGp2v56/uIbIVjTWctJZmMCL6n0t6LL8VAM2vTFrTplIIIz66j2wyCraotlf3tea2D0hi4+mG8kdTLGdh0xkoew8dMgbBqGaJbH7/EqZTZdYFfDNONuk61uYrrl2CeHkzBbpBk19OyYRlefNut+iGmgkZ2kvuXUPwPwIQgZz/xwEGIltnBvUt+t8TdC5dFDiJJSLc/WNMotCCsc4NyPV+fiQbE+vkBg3F231krMsFvUMRuuntZ/KIWo89E3Hrb1LRHSDUZbQ1xGfMbgB/rcdnoREnyhEuLf+6UAZ1qd9bCCQAtK9Pb1juBfluBbT71sLuJA2efgh0s2gq4niZs7zGzihHYj+eLQsIjSnyZKpiVXUlFm+qf8D1qalSywo4rxNTGyymcH/jvTBSUrybwdrgLSWvWt1HdPuAt1Kb1MQ80bQRO62tQNtEATVF1OxpAqiDHDibcgrjDQM1EDZExaad4SghRDe+cL1BsCdGNBHZbk0pZKpigwJnkNOuRuYimqTeJLrYq238KRofsoXRVjbg9imc/0Ag2fPsVBSX5r4QEYEXKCgyMZVD/tgWwuB4aGgBJHZQogv064qn+ECaPvGD8Apk++o3RdoElDJqMe2yHvME4Sx5I8yxatH22GWBiv0qCpKdoneMAMEL4LuKGmsPywQdpbr9BIVBu4FWaxisy0AUohIyowx0f9fE9gLT7e9KJbiC8G1Qqz83409Jzz3BUQxonaA+p8GhWI2M3nBOHNQDr5oZCzcf3GUtRKAw/CxqDhc3JJS36+7bmU5fY5ErlxtlgsF6NKOsfnWvK72GqdgHZlUdhRfHA9Jxq7gnIKWpKgQ5jg/HaBpoeRiDgZwkfijwh1a6OGgVpDJRXmDA9lQiUkSm1eo9VWMKpk5ry86XEuxESx/8HASkdUms8lb6FRGPRHfyAmvl7afcZ1UvWa0Vnmzi2JBAwMb/u2Ha3DF8JzSmemC2qTsyD+KQne5M0h64rEWbriaaIEG8NMhBZxQTTsRP3nSM1ZO1YOvBcgwhGNGcm4r+httmZKj52SQViAwXWKkCp3JdaQKXk7bQeYQPUELY1kBPGRgwmd/DEHxQGkon97G5YaCSGcqF7jecwBRPuYEiALB9nrROtBNPxLwfqAM59tpT+giwhPTP8WFuI/yEeiLhRL9PGGZS4GUJtwbRn9dH16VMMhGkX/o12lIAuEzQ4ljhlRsU0XiepFyFBmepD9rYM8d9oJ5VyzRtjAridbMw2Yp9W341uGfcqF0O3BkeweNrKdl2YkXOAbpyGaK9Oogou//KdVD1YQzmTALrukCZ6eQjs1qr5FL8WKMmQVsATU9GjjuloFLxybagJu13KG3UUMZsKqo+cVU7uQR1xXnqK0NPhkk+0QToDeYIHXWaEK78eGn0LM6/Un69FE3YiQ1w0+hF2NtIk5OGqhNmWbbG5jwtc2GZLDqSuIwfTSNZaHI/j/+bpwWvm2mtFuGeWHDlQLndl0uTOOIVyBig231iAV/Wcl7ehkPUj7MegR8r9/QvXlX+2zDdWWbeCMBRbpBqqyednJU3Nb+0HQZpPTg8ESSAflImUxDnsq8WJv0o1mBYlEE2rjXgmmspDzW7NjnTtHIqFXGebLI1+Ri9OlIStWnyG/BJHzYodKYdGV2bvz4Q7QQoat7B2G8xKkdq2q+d62RdrYe7hdYPKRjo3Qbn0df3uYQdbdJ6EYUqTjswAsz/OjkL3mRDO71xF0ojky2UweJ+APhyhRUwpCbyP60Yxdvl9p44vQRygYsocZ5Olo++UA+mlO3e9lQlGAzlvOCtkoxGohXHk0UkwyaZui/IS0v+Azz2wTIdlZ4YYceT8cBZ3vKlOjm2wlUjFN3z++Z71a6nU8Rfw+NU7BurGdUhmgC1/dRXPkJL5VsOc2CchD6QSvL3Jy59fKeVAgCiPh4qIG8C4lbPB68VZBGizyGOOseyJHSetJG2x1QpOCum8q7xDGQe+e/8iVtpCRKhozcnVXPSbCve6Ue9eRAkRnM1LDyVSlBOWnX6JTMCRZM61Kb2z6AdTvDB5mtNJzh6/nEPgc1N/7VGy7WKaiCclF84y9SRcyiEUkk9QFoDcTWu7h0aORKcD8uMYyHSE7dvxRbokNf2cGRFOfbh4gYwP9B55tkJNA68G7kR/PkhVHbt0u+VQXLr2E2FIOxrBJExvQuHcf+19ErjtYVvHA42yZluvxBuxHVAAgfCaLopSOwsk1wi5N0dVPH6pv8jGEzJE3PdkPRyqgodfK4fshaO/CGwMgSUblosgzKNvzcEfJJkPTBRbQYsd9Pcza4XEsI1lBnLLA0EQLpZGXtmqiogDMyUOmHJr64aAUS2BwERtTFcJFRyIJI+vmr6VH6lFbB40yMbYlfM9JsJIaAg3hxbaFhRoRXikUoEd/ml3awTQVC/ioqpSpUBo80xLK1QDVatlXThPIXxMitnlTbuwiQOgzb339T3t4lIsCV5OJMA/6Kzw6XhVEj7cFTAN0PAGAYhZdc4RbHWSF2Ry+3xVgAMm62FYsY2jVXJ4c0XQnEJ8GZuKeBSqqkTK0wG7R1LGjUnTQ9ONmsmuKPRGfWLQB8MkJ8x5T8gmz1WItsJl38K1VQ+mmXkk13E4xirZ0RbF+dSLaDhJ13hvGPOoasLPiZHSdqV0HdqCAFl2u9t6mgPyJTZoeV7+MXiv9CEvpOyKA6nL9k/oF7PNtfh0/hGmglEpSWrtJDRIieN4Yjkyn4oBWrB1jKbDOOHauVOjx5dE9hulyIpOlGpDHrFv5Yr6yTndyk1aBdH0rCsAYaVY/gSaAkPRxlBNgv7pjwmN79tE74jI3HUQ2mvbdretYFTixTY/byFiCraFYrLDv7C/CYSBzJfVsE7USUSCJ+V6Ee+IVcqycn7+F5xv9tSVtyezQKCdXcDhg/Jls94U3+qfVEdGrLQmlbkmcFiiAuJWIfSINzWb+FJ6uXC/qVf43O8/yBFFVpYS78d7cmufyFMH48enmkuqFT8rOgMeA/DSgJHI3kD2k/wq1MNDmK8KIygOieng2F0eOjhXjQ2hd+S4a0oCNiewLP1wX6D4FQ3BosJIFV5GaGEJsLw501PAeJ9axiKM1grejBkWucCaRjNS/sJS/C9fHDadn2sKW0drfTJEYvD03zJ0yLwsRm/PDLdog6VqWduaKE+2F2QKsa7CTMbPlyfmz3vQhcARD5qoo8zFb37+fiLTPhnap+uJNiv3yhoe+Y8+1/KuwK8m4T2fV4Z6WGNBCj+iui0I7G1wsoOBu4SdjF95j6XcukFipnlRAuSOid7qVT5XQ1Mfl+TYd0h3Puxf0/+xL2Jq2XEHrfFEu4cHZHtMIkIBCWeudgzyCTRN4i6gGsh5bLQWWFlnvkpRM1FEd8U1Gf737INoQlLBH4fbigp9nwrBPDUs+bkxxDykFziVUKUgCV8vjYK4msqOQvms2Upq89zBYJ5pluOGbbIDz9fLmpjyQ6XOK2SHBHo4XnguTQTtpzRy0MxOrsQVr2mAzJ3XHfIHMG5PfdIJJlfRhv4D8mkw8KqXG1GgxHcWTIsTK30v5KxVg9blmsMZcwZqJgfqBgaGb6GZGBLBN90PFPRYWvTUZafSiqhxCa1gb7MWZTDU/Z043Z4Z5kkdHVQ9JTVVcl4pK56/RytIIJlA9e8t7f6tlW4g1ZsTw9Cbf1FiKRVdJVRf8o6TqQU9cvPIstsyEeY//grVMeHJ4lBElYhZfy0ZMbZ5ZNnxxzWfdAZLRZAYrNUlbhxM1AIqsMvAJXJfOC2FKPr24w7A5ZoqTUb2nFmrsUmC2E53yGejUm+r4V9stk7YhBEULcTD4EKxt36lh99+WmgOu+FFbC6kAsgIGXPO9c6YfYAiiBmdBzeMaplwdWiiURF4aYUE/dvdSmHI5jMhRAgsjP2JZG84gIKgcP1XlydQm9WHwOYDgdUgzdaJbMWqOpGRoAk+SKDHlDueOEI9xLuhQZ6B6mYfUQUCjVjG2t9RDfNpgW1lePOdwKAIftsJlobhm0PqgF956NI8/IVN305FelLHYxEktw0OBaF+50tgGfABSLSy2A/GKQ9qlQsuctbW48nxaFG/d088nC2X5we4lonFt5AYsJxEy7MHvAVFZ/p1R5sY+DImVTISLNe/JgsqMgleicXudk22lLDE3CbmGVzGx4R4Auyv6M6I0IMUkHZ9g+TffRMRJT1a/SKB6d+N2YLo+hTa4r2YS2AI3qUkG2dAuzRxEztAy9W//qTF4PpWwkOQugrRjJjZa63tSQYL3IezLzpmoq6ZDEfzetCjCE4CY1+xCFwlY74EmaTYbwkEBkfhqjp4Q62/Oj9VBkdxwYTh+9KlpyJkw9xjAzwcLYxCFZjGKKAL6L2ca9DBOnUXRdG+ZhdXDmG6L2X2l/++lMmyT0sZ1hsbdZ048cRVFiUEKvdktasJST6exb46txvNghh0wXexiL36njSYFt4Uj8PDBVe2ZpHeBS/CNGpi5JoGadcau8EJt0/2l02lLzekSjy+QAgEGfiUo1MTKGo9QMgLwEIiQatqHkFkio+b7L5GcwLf4rb/Y7BeC3vKhqiEAGGryWwyLT9xDJHxV7B5q4Mdp9LPVw9CM31Pv6sfROaD56KiN2qOP4WrsLZi7Et1SPaBvqHx7ilk1FEwzVImaIWHXpUXvdstaUo9pWYVPzuAjOrtmqDHL1rBG+sphW+4wd+lT3DldCes9rry5tFIa4eZee3PyRw6qfO9kVUz8w8CB+L5Mind0BskJmOzD8CkAdiRcbS+Hmjl1VRVKoxxwIWdGt7nKtCXhHP/qMOYyL0HDlE7Vx2ki6sWApww9zZciOSGUUgK4xVgmsFgmkfkW4UxQnIVd72uyx8rmdldH6UFUHL2qtJWe5nisTbxC++uAi09NPRcVzXLjxurEh7nTUVavb246meY/XKWSaVoUlpCbAVrB92EvSCb1Tz/qPCPwYdkNwHihHUo2aque/ianYlzJ2um0MHvVfaC+gir/5hAIj5ENxn/TclLAS0yES5Tx6bekbsNDCo9ZMyeO+UK+wOyCZeAPc5i+6DaYBUiliDH/UStq9CD7D3AKi7fQf7xCoLljRNXUETHRNQWYqbpFoFU4txPxAB8AT5tgSkBUgtKMx8W/0mQeK3Tpt9G9zfJxnnkg44R2O4cNKUubkoJp4VdjnGn/Somp1WBfgcLkuiEw10KDTAQwfYIeQaeD4BByJazOYP6cei2P0LPIWdmrcBkFMYIvH2vljlSx77o4yNI45ZQ4htGQvfffR8HZVSDw6HpjAREE1Fd2OtP89J92whJFJt90QYMR5SNRdLwQuWEx3RMj7Tdh1JgFta1/AfLwvU2DJooVBUM9ohj4wxBX8WRT5Fh7pat+9oNOn1XRY8Dq3ZWhm8wd0cqO68RyPx4RE878L0e2OKUsxOmcocpvsbpxsHaFcfusPzfmNc1gCKD4crjcS7QAg/XjLjjGBjNBBJNBz1rs4zZl4XOcg9DG0HwgJomaQ8HRE8zhqlM2mZGDMFGrN7J+vfQ+hziR2LUhw8k3J+7z2aDzJJvA06XBMC7EN8oyfmS88wwAY4ayN4EmKe6ULYl8SivfOGtaU7ZBYhp9TTSavFwzSEAA71Sg4CWlzWOrLts7uNR0bGA391CXCcYCjAthy5GTm9z1J/WhNzBG4C+hkcVgRuXDi3VVQIE2Sgg5pZQ41coL1fr1kzJhXu/MNTcJrQ35lxkuMSECEjizbw+7tKUVzGdVgdBGQJK/XXNCOANEDdHenzFEZ9FnYG1jenAm4iX4j4CzMZoAh/CzCiR3+JAyW9HPM8b4IMilMk9hoJ/+iXN3JzXAOXHlK2yqi0Zme5rk3xkoaYZwMUYtjGeWdw9S9uRpUcyeMDs2KOHdxKXmPi4kE/A8OOCOXRqcvcjf9gWcsj6tDvSB0LDIKD52CYZaeDg0xsfuwSuUV4cMDGsSNzJJSCAzIBIedNYyNIIkJfI1u5lANjhButCk0cmh5VSiBwVEQuMUoh3d27voEJHneDt+uaCLlUJWZi2GfF4sIsmg19FD4iZiQ6HtylAL/7mrUeuO7sxwwfJS28dm/YaF7Y3KnODeF+JuSeI0AZWiwrHII18dmZgyZrBWNk80oToPpESbDon1oT3HgRz1tktP5vnsUYOUEtIwfv7zDDVV7i0PT9d3P1ripAOGk8SLe2gMFn24z479cKQaRqLa3yWu4Z9xDZjmwfA7g516OMxX9+5Ii1NLjB7hZI+cq+fSIGPIwLQegr+HwWAf+fB2I9esqMBmozQYkpHkXIB4lacecxSNrTwmaFzB8QoKujoNt3VJNsvF1x5fYaIX2lKygqhQEADx2cCDvGBwjyo1zaXb3ENeSi+P1hghGitG5GO2AQu5IdJy3IzxYYAUraDQZzde2wjz8PjeRqAzkl2078BRA8rd4hc7qdKypEJwsbiZCbdrBAwJSh50wlxaEOtfSFzgJscOuOvq8jDAq1Ag+7rdtTjCn1V+vbag/F6eXxPOa+spzjNsp0L0wT+BbPL/zOO07bur0vbgCpEGihmKh6h/JF9EDs4AZxRxE23kHLEvwgPNptqrUWkRkFvfEzr1rStAtyvORXbSGjEmJDIUCoxr6D39UB/td+tQ/E2zKQ9GESI/La/+6msqIhFBXbImuAFz+P6ax8jm3NFp+fOpj9AFdf/KN9JRQveANfCtE0iL6TlvgYmopzV58Y5sdeuHyLVyhKwiIw+SR9A/1T1Y8aMZgHgzeHg1H0/BD6OQHnbAU66/po5yZl1hgYylAqCkT0Pq4Yl+Eos7KBDMn8wpOxIsiJ3urlC+FSdSh3DIYk3Mq3a370SAcEBOMaz4zyhsJcobWYm2OGHLLhJSBwFFeN33yBhqpn0T8+bBAgSDaYFo8jMjjnz5AYAPsYb/3JFazPGOg2BVG7LvChIQlsIXCHp8fxmmwFn9V3qhlUluhjD6IGGeew0hxpGdcDozpLVGJnUK2cpCnfV8cAD69czx03Qzu+OXmQLvvCmA203VFsC56c/GOi4ZX05qkmZ+ezP9AxmaMp0WHHvqb6DFctbjXvqweyHDk2CRFKvCYNDtvFk9oRzAO3OR2dw4kmnbLOXfyyTpBdjyxDc5h/GxxZgtpoGe1GGJaZJ5fmFyfI3Xika8yHM9DbUMK+vVQM5xRwUkIC2RAvhNaWlYLCAB+w3JQHphTRuK7Y4H3JaBDmf+kTGaEi0k6lKyu1+msFfVuov4cJ/YeEz7P/n6cKITbJtsBFIUV8PSFY7AkD2iaPsx3WMIxD1mnl8s2nDIoL7qkIF9eTdmvU83lwzL7YHSo8wQ6wzU6rueLXKFasACQl2wqd81Jg+V/5o8FX6/onWfn58bCk1Bo4L67T4P9JHvwKucLmD37QXOCbN9jeLwal81NB1nVGOwpA+qYbPrAFhGvZKdUxxksbOrcc5YjlBbD/G+kDmQGNzhdak3O7XxXDQZTxtgrU2CF6UJsJaEMjDNugrIIqPd4FCmKxoB0HIvtSY2Uc2FRfEySDwn9pegf8NGDkktgV0WLwoPLW+aAYABsMk0QQnwhbgmr/aoCoEhy8k74mdCucebeifGWVBRFMQ7Zc1JazpV2rPBJ2ky8VR0izSfXUcqP73xEiTIYUFGBhx1VykFicH3HEiRw/zRpLAanou34JDLPBxUWoeJgGa/6SLsiUkQq/iUCRN6uXCUSlJau0kNEiJ43hiOTKfigFasHWMpsM44dq5U6PHl0T2G6XIik6UakMesW/livrJOd3KTVoF0fSsKwBhpVj+BJoCQ9HGUE2C/umPCY3v20TviMjcdRDaa9t2t61gVOLFNj9vIWIKtoVissO/sL8JhIHMl9WwTtRJRIIn5XoR74hVyrJyfv4XnG/21JW3J7NAoJ1dwOGD8mWz3hTf6p9UR0astCaVuSZwWKIC4lYh9Ig3NZv4Unq5cL+pV/jc7z/IEUVWlhLvx3tya5/IUwfjx6eaS6oVPys6Ax4D8NKAkcjeQPaT/CrUw0OYrwojKA6J6eDYXR46OFeNDaF35LhrSgI2J7As/XBfoPgVDcGiwkgVXkZoYQmwvDnTU8B4n1rGIozWCt6MGRa5wJpGM1L+wlL8L18cNp2fawpbR2t9MkRi8PTfMnTIvCxGb88Mt2iDpWpZ25ooT7YXZAqxrsJMxs+XJ+bPe9CFwBEPmqijzMVvfv5+ItM+Gdqn64k2K/fKGh75jz7X8q7ArybhPZ9XhnpYY0EKP6K6LQjsbXCyg4G7hJ2MX3mPpdy6QWKmeVEC5I6J3upVPldDUx+X5Nh3SHc+7F/T/7EvYmrZcQet8US7hwdke0wiQgEJZ652DPIJNE3iLqAayHlstBZYWWe+SlEzUUR3xTUZ/vfsg2hCUsEfh9uKCn2fCsE8NSz5uTHEPKQXOJVQpSAJXy+Ngriayo5C+azZSmrz3MFgnmmW44ZtsgPP18uamPJDpc4rZIcEejheeC5NBO2nNHLQzE6uxBWvaYDMndcd8gcwbk990gkmV9GG/gPyaTDwqpcbUaDEdxZMixMrfS/krFWD1uWawxlzBmomB+oGBoZvoZkYEsE33Q8U9Fha9NRlp9KKqHEJrWBvsxZlMNT9nTjdnhnmQN7Punf5WvtkABsVCJnJ9ccLEupapge+WNnMEW/Jj1XI+ifoGtcHR2rsPm2/SmUzSLz36REUq9Y0NkEpe0mEkAgHemRAQ6blW1OMTd0ZUInEsy+y7wUY4jjyzwj6XMBVQhhyqQXt0IK4BsgG70tNYkIexfLychzBr/j00ZxxRO0IOKSNZtv+wm9ZcfrYieYrxgg+Og2fCmhtVfNvFz+1Dsn+QMS4vNs+4y3VxkZZHbuKO3lW2w+hXvi29k5ncm0ehMbrIcpjXMI3sjhwAMrSBAE41Vu9AayCiPuQC0JojOfHvtcOpWHLS3Cu4tuzkY8WQGmf0rQzm1hX7t59WaFtlkJhfLH8Dl3g8srwMXFAQ3QBV8vJMUuvf7C5JQtGi4SAwTckbpc1KvmEFxu1jXCTNwKJ/vHi9lGumE1LKpUN8jiBZsQNSzwI41IHhXCkWJcuwIbd0UYd/+Daqt5XrP+LSEOsm+g01xOhSUy7pgUA/5dHBJepu2cu0V2xdcPwxo/OChqejnFyx7tolaNkRmiQe8XEVPlRUxX2piF/WX8VpFeJ2G76yg/Q9s5i+VYcU5eOCZQ9PbDs28cu/odWuLEmmsIukVUIXinVg3XuM2cbZAJBBX/PEM1KWeGZNajVP3/wBQwqK2If1hc7dRHWWlqjHYfObGXMjTCnWX9tHQ66X/fFgcHSRv9n6AZNJqTOgTVPQWPcV8ZOz+tua6pke/+PrkXO/ADCaxHBWRfMqtcTFi6JAYu8kWpIMvKK80/mDZXRx2IzQSVmpqzGQTR9HTyh50qHy7AsAUeXxSUWfwfG68oH76h/qXlbpwfu8bch76srQcY3R8mH3/BJWlwhkek8vMYXuNI4bqWuYMigEGgubV4Fo8Ej+WzbF+NQpsYR72d4BBCTpZr4BDJDp0y9ajUOrUMXsGwIaxWHNqTytoAXsA5I9q3v7gD2GyfyRlBD0G5GRIzZWuPmWTSPybe6uLYETMbfGod+8pOnraXrpYK65EXE2mqL8UXPpV7kZ6k2eBYiRpGOqKaa1Gn8j8w/MvgFXQZ7yDpw06zkFB9TB/ZGV/VHPmiDX7ZBDYJp3oVydx0IiWNp+gTm8cZoUouoWNtjfYVGRZLwbw0ZSVSgpthpvaIH76h/qXlbpwfu8bch76srR2B3Gb1fXZApilPo7UZ7AQH7mYaM2+yGg95AqCQqGXdKiglwVjHCR1yglyXeOL82h/9Sjyl3iHJkCKhefFbXsUW5vMN4aAPzXU7W9Lz47c7JHoRSKYMs1Wndg3xa2U2YwHV0nAQI5f5fqXQTgAoB7UKV3ztN6X29m1xRFjVE3KPCLOIKZc+KCRRShHkz0xPayawFy0DAlJcZ0x55GRh3+gegi1JzWf75RIBnBx0DV4RGGkfoLYu1KDHrkPZ2XGQ3wqLf8bIvi0HPNMijXyHBuwCXMeD5FfYlJ3kxjX+FtOrBc6ruyOjZvgLW4y31qTPmxNc9I15aDsTxOUw9HIS8iICIEe1xt/sgcWpOc+KFDt5AV2HXg19Oe725pjnD9cE4gzAF6GBu8fm5iaKrOWFP9Eg9WgXtEdbGk3YZo9DG05bHjczWKBwIUcqZgfNi5bYlACTul5WKeGQxkLMEk0ieK8HA2smB4YfZRwt46RofjNAEoQBcI3Pnf0cn0qzu6+cGxtJ3UnSvl2nOogiKIy+OAIJ/76zj1XY43ShPCY7fGojB0L4OeJU28jXBkLgjrlcoh+0YWIVDk+JfFAY/lRrmOcB1wNG6wCpGGqzYCdGEM0rHweOR6eNFtf+uoje6G1rKQE8sDe70hhNjUgcrcC3njYKbjKgLRTvXWJyD0JyuM8bBQR9ZpXRqlDhsQtFK0FX9SEkCqud9r/rqWzraBqQ1xIO1AOiox69mOg6LFc42vG7GKmr/FBygOxQXIB06SGTEhHDVH0kMZVcNfU8ZRUODb8ljCCvWS2ptwlDdR4dhpVAJ4+oRLCeNiymej3gI3F4ygBbpuOMaUuYh/FptdzIzYYVrQEOyGWFz13RfBJDlFSjDa9F4o66cgaxpt5YNPC6XhX1lmkHeJfPF8Glx9/x99QFJtMCD5fZe+ZcIwSKYFU6JvFi+p6DRZVbUgltzOPZlRVLzljjudz9Xai03Un+IQIkR6B7McszO40mSHpj0+AYCNIZAbRPB6Q4UxBYSJmW/hilAtwQt6zjoXerh1cimvUoAguofvfbW20jMR2U0PIQEWMYErGUvrYelrekb6PgBxqPD/NcD8ztH+PhIEO+lConBsFFIs+KXmNmO8beTKIJIKBf+G1AHopJfWJYk55d4BFjGBKxlL62Hpa3pG+j4AcMBx0yqRQChI3Di+1y23CdB1D0nt7szCwx9kxnCMafRhjaVbEyz60Auzi7VVs/f74PuI2b/0MLR2X4smHu6uWAJPfpLgf2zpN5nsqlaJaaXA9JIKW88wa27a0z1e3JZc4clCUvdrlKyUDXzQ1Fg6p4BfKYrHARsVu1fUjZ31l71xA7H5MZSuaF5RXpiDSBI00POiugHEaP02tKO/G4BpUvDsJzdrRtoxgnsQEHCEvOsBStmK2DcIxhudb3z3+0+dUacpamK4EKkK2BNe9MVi65FLupClquzDdVNXrFSyeBvBP+pAoQM/XD5CiZ5EqaFaEKv9nJx3W9YFWCzURhm4kHE+3j8v/ckpZ54B0LrBI4CxxZudiN5eKEXYto1hC/KmALIQwmCvbTbWpDAEQU6ISFCc4/gxhQmq+bKz1SXoUmNQUm0wIPl9l75lwjBIpgVTom8OMBqImubxeOXPHYVYZODYI0+5zXZXKERtTzraohLyHo2ljiBitzG3Q7YFXNJUodBx6kZbCur9RhI87g+lzBBl9PrBSIMICg5Ju71vEIxwxcuvsBm1HaZ23i4Thb+NQdlxhBabq/7daHv7JRdvZvDCilGiQDXGPJHLlbZHzh/iJxFNjDEw7bZaexICuU5eUQqQrA6TehkUFtXdNjXonLB4XeVVVYtgk4MMhm2S0IQAZfT6wUiDCAoOSbu9bxCMcMXLr7AZtR2mdt4uE4W/jUHZcYQWm6v+3Wh7+yUXb2bwwopRokA1xjyRy5W2R84f4icRTYwxMO22WnsSArlOXlHDAdb+i0Pqhg1r9swwqB0SpMstts5HSKGamOo/se6goPQJFqBeRCENEMi1k060gtJTpLEOeXMXSEMFezyKmqCBuAe9X1l4C84t+2LDONia8Ccypk7sh81LxS5SMw1tnXB2YWvpt9h41uYULVUzpEahCXCoSsGmDPAYIW7eRss5ITLmPg+b4isI9S8a8n/gRkJf0ray4bliSA3gjFTbzT0SJ0fTOCp6S080VL0n+WT70AttBAfjl2j/tHABEmEDpHIcGOM0q40LHhajes0d0Y9xOu23Lo83ncLcGNPKZJEVsJ5Xo1ABoUg8WOMDohPa4GJ5v84fd+fnhviuQNXF7/QCbEN3qNx5NKuTlD1ofpdxUReieaUCOucR5tn3gJY3fDHL/DtF1pop2fezpMd1Kj9gfdhf2yfRizrtuk97FlRTUK5v/Lz7DEKtdg7abykCKgBHcj2W+hCezxu+rvL0VuRBXWiwfad0P2wImnPRnfEkMXFm4KSK7Mm6uw6eHpup3oBl9PrBSIMICg5Ju71vEIxwxcuvsBm1HaZ23i4Thb+NQdlxhBabq/7daHv7JRdvZvDCilGiQDXGPJHLlbZHzh/iJxFNjDEw7bZaexICuU5eUV41p0JWdnrcYPmMQ+EOihIRR2jv6kEkH5dn2SY+VQEQHYtn4AVnuHSE6SiM1OLq0BiVxZMB1RH1tk+aFVOaNXKWOhMw3NI2lG2rHMywVUACVdOLgncfeNBaqIvQ/DD8oA8s6cMxWn9qE6LMCZugxfBnjHw6jeq2njrVSnq0BUBBBGYGYjbMNCqt8rUkjNH9YEEg4MjAVd4QoVixM+RZSgDYkFq6InKMJ10/xLINJVbAWGdQjfnMSb2+Hnwh0mL5IYeaAp0/uPHlz1YoZeZmcmFVsJd2DCp09LMhQF4aM0nBjWLsRmIgXTbHtwZxp5l8sA8MNhkVxu6KKrQEiYlOgHDfpBoVpOJrd9UjD/xqKV1AZMpdCa76J3LGzH38hfuqkYeaAp0/uPHlz1YoZeZmcmFVsJd2DCp09LMhQF4aM0nBjWLsRmIgXTbHtwZxp5l8sA8MNhkVxu6KKrQEiYlOgHDfpBoVpOJrd9UjD/xqKV1CcyWqvmZ6p/w/e7I9+BwvcQa3SEhG3Z5KIKLVMGVkBKAkT1mJ9NXenUHgZWgMDclgTx1oyIusRxKu3Q/N2VZMEl52hvHRhJxcXfpwVZw1LfCWRpfEXe9XSZdsgMxAQxaSI5r3RMuxn2E2xw7pLlXDoGJBKupd8Jmmpekt4YlNfEIH9MCB0NT9rzFQ64mi8Xmhqv2JZa9Nz69VrvC/47jmAZQC+iUcBOLOlgjaPOmql7FKiwRP/xbQWUNsl2VoaWFiGZ8cSFLThd5DpAQF4yJKoOk1tepWwHG7Cns09gI7xiCHHV2NSslxlJJ2OJNschaiOs2wESXIA6XYvwOrOPxesC7I4q86sY9I28stmwRe7cA6T2aKIX/CkHTUDy7lT6kQKyLSJGXcrMOTOumFbbPH0Kqfo1bOTaKkPAtIlO65E6FWKtv6lLJSNKqJsZC/9OcwKD2FjUs8IYEzfUOteYw2cCO4NreDbGdVQ5D8TCnW6iGgTXErnldRpU/byhGmOz1QXDR5JsROpwb9alg6Ew7rUiAbrMiI1ok7qnH1X5eUy3A/T/sRyUXI6ru9Bqq0ya+iUPY7Hefw2Y6YwavBULKF8kK7Vm875xM6qtkTRfuizoIbpR/DvTVhiL5ui14xecXwmMpgk5G8DHRdPq7dc3wS4FyEOSv/YqhL9irRvLDBR9IYwu3J2XzovmJruIM09m1RNpqi/FFz6Ve5GepNngWIkaRjqimmtRp/I/MPzL4BV0Ge8g6cNOs5BQfUwf2Rlf1Rz5og1+2QQ2Cad6FcncdCIRGe7dGUQRciEZXS5k3RlBCFrGD3xGQ5U9N6/Wi/SHbwPmvsVg/LZdAQH4Wj5PJ/IGJSE3dqcm5AXdaoTju6XTCxgZ6Y/tlX6ychYIHMpJsRFGQHdeDNtLnAYYLsCyyNENNQziHEuUM1NwzG3xFn2UJCu1ZvO+cTOqrZE0X7os6CGdpIZrU6DT4vwfgpksrmUBtV/oiUjynuY/xElfAVJKDheZ9WLdROwTRzstR5hOpxyEgN+eDgCVFS2i2/HeXh0HNamtM7fz7N2kpjCa8RUyFM0tNwOyy+Fv2OPSVrSW0ga0ehLarEj248gsMwpflyYBCX+BwhsUXspYv99vSsG/E34Tuwh+N/Fm87bgIULIPxypT4Mtt4corDJEq9SC55sW2KI6/OedbLSSdLxi6/6xFLX8DDvZcvidz41ksafbdQ7l8L9HGWcfPcSqu7luurclJQFwV3bY8UdH6aJt4v8rIMhfecL/bnxz2RX/9EcedxB3JpmVLSlBzI61lb24mzgZrqAHDEKvI5cG+GLmHS7hAFePJXx0MDx9eoAWF///WRH+hT1NXe2ci8AQs+b0Aa4IlJrpHDbcmf3tykR63wIiE+EmXejIBib/yIM8MTYY8wQV/zxDNSlnhmTWo1T9/8AhcMjg8Uzi1ENiJ39/nR5jAPYferSo+tJEmT2ZLXGMlgmL0VGw6T4jCNKdRpfYpFEAV48lfHQwPH16gBYX//9ZCipjeFYcg+n30mpjJdBqFBHIsjeD3CBXCN3s0KMXz5IOF5n1Yt1E7BNHOy1HmE6nBcNHkmxE6nBv1qWDoTDutSdzrYOV4sD/+fA2SPzeXWwpYsrtmz7zQIDrIonH1iC9JKufMED1NCPM+vQZMfX0FRNpqi/FFz6Ve5GepNngWIkcA0pJhBXkcwhQreG+cgE3C+Iy6Y0X+KXJvkb5VNz/XQk/4TQqsdMcW0E0gamqxL0PclK0Ie8ePdKKk5w9+OVqAqjf4OYSyKkD2jD/5Ium8hyEgN+eDgCVFS2i2/HeXh0eX9iWxBY3DQOSYc7SKD4xEIdylJyVo1FfIV6yiNcg/xzO/ws5Xxs/NaPwlX48BSopzXi1Lx2QlJxZ6TKbgI7SKjXYKth46OgGa+JEm15hGAl+NR3R/lA/jWDyB+wXw+kTIXkHTsWIUqjYzvT2isZGIUs5OAZdkRZMwUj2wvNS+wJo2z/h2xhcoSF+C3O/UfMEFf88QzUpZ4Zk1qNU/f/AATkcOSUHhNQKZXPfGDwVjSH6JoFlC69ZH3yqFGvyxEQbSGocmmTMroSOfTnyIYlbByuAZh5YHsMNDDlQ23vKuhqNu0rFMHNWaG5LwPVgYnIe4OUGyt+1CFIjjyv8+cPNFLlDjUzPqkZnSDWRyxlTaB/n44IbZVmzcBfVl9gv+G0IcbEIcsXcLBmW52rDqeQgHuDlBsrftQhSI48r/PnDzRS5Q41Mz6pGZ0g1kcsZU2gFws2g2+PHUOCspxQINTnsFYNTyC9YMRySM7juxy5K6BeHP/n1Ex8DbEReWllSOeQhJJXFFGNr1dmMCdtUS0vuATRV9t6jchKrNkJKswQ1OxWDU8gvWDEckjO47scuSugXhz/59RMfA2xEXlpZUjnkAaSYefwhPliep7J4k6GM2yADbi+/VjU/pbtRtIBN8/8AV48lfHQwPH16gBYX//9ZB7f9lM/a+jT20u5iqNR4KA1Y8ortrmqzW30QKUOsAPUD7pZUu0n/o1QjpFDXgPXQHcuFfvPib7JKarZfgANOcAyF2GtoewoKWQlTlet2qWoc9Q4ouXIUqsGKTcXAMMndEnwtsVCYP4LFvSsF/8eTUA5qdp+dr5RfWmHak62IjKkU+wDZR7U1qkU6+It8hDw7AmMy9GbXkrtLpH6VXMsXsCXGeUfs65Vy16hvwrg1vRgSTDr9PO5Ra/wxSLQ4opNnHmi1xiO5Tipx892iZOzEexJ8LbFQmD+Cxb0rBf/Hk1Ail6xSJYvJZYdJcUTCtP9lHoVzNJuPxRiLaS53C3SdbwfKt4FfzJNCEXH+G9gAg8cSXFjYEWND2v9YFJnD1HxPIg5TUdZ/uBnnNc6s3X7QxQoDjza9oHJeqglUB3WPW7oE79T9JbZcw4ElYTjw+AuAHYoUdAKU0kRcurDlF6WUsgTV2K4UL2qHUOjdrVMm+yMgCcQ4wiPdPTH4MEh88bI4DWFqzFG/6GRX7Xir02Fo8SbDkVrAtWZr2Q3+GpJpugUXWehBcYeKUEGOAqbpopM4Jwn1wHC5JWjmY3LDozz41RrV2RGQX4CafkdMV35eW9kJnCGq8oyCKywruDhZfw/KC6soOEuOQW41OfUXWPCJvyRxxCrkKLGyo2zZkNJWgfoLxegKRXGEWJKlwwwS0s/dIoS3TnMpwwpjp2Z2RhVtzxkfxu6RfVybQRIpFE8cwdklRNK56VqUxTnvERBZz/KDDYSW0xKjo+9O2BbjRLrwExIrVSKWBlq6a0oBP/hw+b0oavo8Ld7emQQzK9Pa4gwFFKiwRP/xbQWUNsl2VoaWFiGZ8cSFLThd5DpAQF4yJKoOk1tepWwHG7Cns09gI7xiCHHV2NSslxlJJ2OJNschahDsZVLogEb0KfV1ByFnr6UJnCGq8oyCKywruDhZfw/KC6soOEuOQW41OfUXWPCJvydYNJlLHFwRKLqxfjhbo/ES9ScBZP7gA7ur+gM77Z9oIEHJCB9DqIRDWMfrIYQ/+BpPkee6UlQsWLi6lpGuWU0ZO9WzTcKbrq2rU5xoljc0GkpVlcvgJeyFDGI7eOQIcw6rR5nHeMMOvx6I28vxb/MfvVtA7RNuijkqjamq+nsXH2EB7D5d6hnnJ3KprzGF7hFgRn2ZZVWEYh4xU2QfkjQbVY0cRyTN21Qxj+oBZY1hBzEAL5UuOgGqGq2bYmeT/SgrW8cNY4eqS1gi5iVAlLcey6X2wUaVr06yMd9eJvXKG+NGnYJx/z90Fik+oggXBhENvrSme/pmxCeXgFub1vsAFBwEwyBZECcJ4+lk2Ff2CnJnfCxMzSzD+oJ35ow2jinPg77DZf2ZV6IY59RoMQIkK7Vm875xM6qtkTRfuizoHLGXev4U1HFTFR0KaiA7uBkyfBulQajsEsZ/1joIkDQFmUQ+IKHALQY16a2tVNeGJuBHtLpZs26qxpOisLLRNBDEfpf9PyQwWu9p6kD1eUYPHvJjSLAm9OjD79i0BrcgAcD97QNz/s+xt0N6mY+68gSLM3ddkhhYSNic7aN9xxMLDFsAxcUZBJ9l7/MsF5+EGB2pzmRP3hbejFYsz6HKnwkOnscgaVMVn4vwxEl9zLAGJSE3dqcm5AXdaoTju6XTJiRh4to3fB+wTl/KYoghGBpJDIrndwZc7vyNP0aHZ5wmB5XSEG9dMpbodzeVXFvgJs07GYP+0W6X9TnltJ5ORwHLCtL8RSKcQq75EdPa0JQgFrSo8FFEz38byZ7+SyoGFxW4GmgU5L3aDWcZv0jZHRANh/0uvniA/qYHB2wrkSsAWqipN7WUc0ZCdwRJvUpLE85ub8ZAoKbUH/aunhvlZwdmWEJvLQ7T1EEm635JtY4JSZF8Yhss1+5hDJfm928wEnQbIajf5PmEOEe6DMXC0BK3Y9JbWFQQfkWzDBn4wUEQfzccAKFiBo0vpceSkyLRKkl7drjoF9z/E1moto0C+R+RkpRrAoIrcxj7s7wCAQYPMKa6roAE+bUM0utaqqPCEZ5lENLXMpG8UEDgSBrphSXABTYPKLCn9cQugM1l8vokyjXxNUUuGvhaHcQd9k7NIeB0k/UMxBuYTvY9LR+1SBPaSLylJZSY4gbzotW7VjYcVC1LTqYMX56LPjoUCq0uCCFyjJ6hQssAArUoNsANbwLsfGEYCh8sXohK/Ux4k5ETWFxuBLCvwuqPL8G5ylFLJ+PGkSvl4FGurVaw/XUwuQvLgSaqj9C2G/OpnooEEDMWIYX9ePF2onO0jRmhUpowEd2abLPMnZR2FV06KKmwxQplPKQDmBkJTY2zZzUv+DAWy7JcrJmUlVXZcDGw//J9BgVdC7pppWMCdNqyLvw2SB7XjHv7/IpYJPRztn/W5REY5tlwxQZ6P4An4OyXGgPWF+wLKQHBmLOSaO6DDpMCbh7XjHv7/IpYJPRztn/W5RElnBJ8ned/3t2y7ea5nBo2DW9y4uO5Sl3PCHHQsFCSChNpqi/FFz6Ve5GepNngWIkWfaUbKfHRIv26fEnODAc5EAmUILnbiFWFtKaybZHnZyhEpJ1Pjx6kZTJAEP2OqHoj2re/uAPYbJ/JGUEPQbkZFvEN/AfcWrvzCZAuvMoVaBuOMfHUrtG7/1DRlD4ccCEVoXx6dxRnpuqWywjymRR+GynsweT++DOUfpZbokeh1xxmuXHlnbqlxCmdNp6y4UMf8o4bMZ+Fp5fD117XcRKhHnvAPdQeX+uy6o2fTNLtrgrdx1Pxub6E9c7zGPf/H6AaX8Y+DoRKzofhl82E+CArFHkLN5OhCob2HJjn0rFsqCKRx6qAB35h0DLBDOdQ2e0GHhch1a3CJx4LlUk2qC9xKarxRWDcAl8iDJRMCgbH8xxtQYR8r1X+zheQibjjHnYaokPPjw1YjiEfRm3//Q6iALbQQH45do/7RwARJhA6RwVgWpQ2Yd3I/PMGpPyFEbgFVrXEadCEBv5VcMuAl/hlAP2YE4PHydbcRcSQBII6xSQO5Os9YGTx3VMhZmwInR0n/zt/d4HvdIqjPvgCnGZsJeHaObN8UoksDggKmVMqDxNQE3/RF0eGFAFroOpvJz0LiaOf3EmUgFknlhoIwHsnB7HayWJfCvmSJuKL66fwJBx7OjgEMRIYp7LX/YnSnlgZQC+iUcBOLOlgjaPOmql7FKiwRP/xbQWUNsl2VoaWFhGDzEz3RStKyP3aesrSVKAMud29b9tsIKaLd33AQowaECrQHiWatNMMftu3mhJ6SgmcIaryjIIrLCu4OFl/D8opadDHp1RH3vYBuTWGrnqOGiXpD8MUXKvJKHmqsqdduyfVSPdFRBOmNr1gUlwsvIEFmUQ+IKHALQY16a2tVNeGKEiGsi3RjRKv0DSlVgTSNg5jxSBHAd5MgaLRIj6Mxe8qHpq1AkZYUUyzv4ggyR6BAMIzWeBhxnxhQnRVTeRrLSf2Z81V/pXaTGMnOeeSIZMoFspSdJGPXihR5mgFCvAHIPAeDGwavyPra6WNv8/BgBZHf7x19wzAbrYud8fnLtIqHpq1AkZYUUyzv4ggyR6BGsWo18+J7s2OV+iPhjMcFQTY/Lw2Z+LT8XlCPcQh6tUmfcwCX+FCk2BblvmC5A5gEp4zTkpj9s9cmAvItuzHGxqtMZJ7IPawOm053t9QpwMiy20L7bl5nXOLyJdL6IXsKh6atQJGWFFMs7+IIMkegQDCM1ngYcZ8YUJ0VU3kay0CJhlDtbGDHIiBgHuZSOjfHsul9sFGla9OsjHfXib1ygRpiXS5T5edjysxVJ4nVMMDhZ6sBk1rqKJISl3Btn4lH3+ldawBeqilr3rQszKTyxXOCWCLnvThp+MVgLocACIdWKpMian/BPl+YYk1k7f2FSN9E3bBI9OeJ4Ze6NNo/wYlITd2pybkBd1qhOO7pdMpoJSUCVstWwJffcDZdvnaCpch5WhGtGOSAzm99eSGACTiU4dEr8d9uQYGTKHHH1oV2Zc9fwa7xs6FPqE3cT8GCu/7DvN/cOzXnKlcCk/85QDKJ4dNk6RIwDWducIg8JsoFspSdJGPXihR5mgFCvAHIPAeDGwavyPra6WNv8/BgAe3bGxleO00p7LCQGQq32AcPUvaZRh3qdLbTYfOPCKQEjzWgtCM63PFr353sb+P4wJCHgRdjzR4nwBAhKtz1x4LWA/14XXrxO8/QYQSJyu9ISo/OsebXHAYLgt7TGmLAiNU8Scxu6OaehGXDUfPfv4H53hi84AmbOclowhWvWxVEXW0Jr7xGviUWmRCmziS5xCAECoXYXZQyVLciEY/xj0BywrS/EUinEKu+RHT2tCUEPLUtG1ZnwZkG54ucOIv+wHK/rWheb9Dz3Zcj3yCXtMeJeo+Upa4Igt13VWlEvyoCUkpcMA4hE+ag0uEqEiariReSWXPISeOtu+VAbYbAVoliIclvpUz0563nnrV1dOLJR6HSkKkLSLNytIgJqXhqhYFFegcsRb0A+A73nrtGeANgAXKpAzUEilY1OCFNRBfG4B71fWXgLzi37YsM42Jrwty1GMzm6HwdtEYerdOQA4MwHYIRqS3bU5X5fWsM1GPD3/QEO8PVnfD6wIBZ70yrCEAJVSuRKmhAsJ+PdaQVEonzRP8K1ZUrLgjnp9iR3eoHnvAPdQeX+uy6o2fTNLtrgrdx1Pxub6E9c7zGPf/H6AOLvo3+5C4GrJhCGDTWuwiAgMkL3SaGGFTw7JU5+o61RN4hrxpIR4m0G1xvtkBxFoQYorS0MKLNdvw6Ni7jncoAPFtbJ14rRpttLQBinRuwRMPgcds7c1WE7TEIK14QdsihLdOcynDCmOnZnZGFW3PGR/G7pF9XJtBEikUTxzB2Q644DodDfbxBTnAivVZdU0El01MJjtGH10oRyubHbUnDbZNVSoijRnqdhCbT7yslBQYGri9oa7ssU+cCcil7BcKMsa0V/926JlbqKKdmOnTDAtIRNogX+Lft9+ZbT7HyQft814GVYf8ANlQsGDMC84Esl4PtHp/fPrLE3ral1ddHogG11DvmF6Pradnbvqx9SMdilsC/pq1I3XzNS4wdd0eAQHVw0knWkLHL+nqVccEIU1F/q9mn7PgwHQlW96+VhFQTeYPRqNVzipZAPjqYwEKNpPrUIsh4vRxJ88ycAFKDKnCufVwn/g1BlESzHLCtBBe005F4CkHe4Abc2t7zQ4hr07RWyB4e3T5AiA6hQWUBiUhN3anJuQF3WqE47ul0yYkYeLaN3wfsE5fymKIIRgofCg5qp3KmgqIiQQRhChBB8sq/h0B4NbGjwIL7UnXxgN8M1i4gOLN6vvsgfVAY14WAreMrKUHKKMG67Dkatw+Fm7UlP5JRED+i93qfYu39xdHmBNMybZ6UW1Kd3Th8dcC7HF5bZorRYUX6EgtbDxJBhaj5rtLJhWoylIxYg2VliK7SrH+1cNAMiJ7TbOnGXohp6bLaywcRIEtzdNc1PI3AkiCFi+4VhtjlqYJBFvQiRVpKKXYrO5pcTCWPyTfXJsR9h0AeOem8D972XIskfQdKe/pKre2g0GjIwhnPGCChhlAjPDfM5dvigCG4hhRA9wdr2QOe52099nQ2dYLrLy4IitfucV3q/W6HRFg2ty/oSlIhPoODC8iYSziYKKATAEMYZ6cYYvcsfptIT3Bse/mB6/KbvLpDbipEEf/3zh0xAUSoRstS27WiP1pYH7Na5QCfZk7K0qnkBWnTuV/MILCJ+OZoI+kHY+aC7TwG1FRjwBh7VYWE8eQOqpLFhxGr9gfy4efPETooNh4qTbm4BZYCCZelo+QBVnWEe9bWlZibg8lvkDEDr57z4kcM0kLmjcGpJ6+u312Jl26PoPeZahfEwCyUgy7GgNY2wbiVg55nCQ+ivgRHm5D/SfKpMNZWA8JnxakeOc2c2G4KfoqRVo5CX1kx+Mcx+B0UUV0vRNloxzipM2uUBECez9+wcPh5Bomxqa7xu3mN+NkiBajXPAUD3eB6v3T8eTIn5JYgz426gbOjASsLYvj2f4VZ7ab2qEopy7cQqkYAiGKTCymV+aI diff --git a/unitgrade_private2/__pycache__/__init__.cpython-36.pyc b/unitgrade_private2/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 55b1e08fb7a2e153288f98c9683097b615a31d04..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/__init__.cpython-38.pyc b/unitgrade_private2/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index 6e9fc2e8e234b8d82d001a785d86af68821340b7..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/__init__.cpython-39.pyc b/unitgrade_private2/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 7bf2f7227e54c7b2301a6dc324a1119b37119103..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/deployment.cpython-38.pyc b/unitgrade_private2/__pycache__/deployment.cpython-38.pyc deleted file mode 100644 index e5ef1a82ffe3b183639daf4d1197711d94e8aead..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/deployment.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/deployment.cpython-39.pyc b/unitgrade_private2/__pycache__/deployment.cpython-39.pyc deleted file mode 100644 index a1183907f15d5618aa6e7584380d3fad693ad2d2..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/deployment.cpython-39.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc b/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc deleted file mode 100644 index 64a1893f40852651e6f4f6145d3b8ddb4b5e6dcb..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/docker_helpers.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc b/unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc deleted file mode 100644 index d8bd23d3e204c918fa1d3cc8d5f223591f47ac4f..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/docker_helpers.cpython-39.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_create_files.cpython-36.pyc b/unitgrade_private2/__pycache__/hidden_create_files.cpython-36.pyc deleted file mode 100644 index c10d3e26f4209a3cb1de8b9a8cbcd399dba53b33..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_create_files.cpython-36.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_create_files.cpython-38.pyc b/unitgrade_private2/__pycache__/hidden_create_files.cpython-38.pyc deleted file mode 100644 index 495660ae33400dda27e6cf64c7f939036a688a98..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_create_files.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc b/unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc deleted file mode 100644 index e9bdd98457f31c4fc351bf81ea01a6d12fb3d531..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_create_files.cpython-39.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-36.pyc b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-36.pyc deleted file mode 100644 index 7aa51f153e94d74ee8abd09983ea96ac0bc500f0..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-36.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-38.pyc b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-38.pyc deleted file mode 100644 index 07472b0de4bf0b0f74be03f08a47140647e9cb27..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc b/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc deleted file mode 100644 index 5c2e1ae6b68171c81e7ddf38fb9d45d3f1968a79..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/hidden_gather_upload.cpython-39.pyc and /dev/null differ diff --git a/unitgrade_private2/__pycache__/token_loader.cpython-38.pyc b/unitgrade_private2/__pycache__/token_loader.cpython-38.pyc deleted file mode 100644 index 31836c22ea1ea42dda4ca41936fce6eb9ec9f4b6..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/__pycache__/token_loader.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/codejudge_example/__pycache__/__init__.cpython-38.pyc b/unitgrade_private2/codejudge_example/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index ae23c7f18121d96a18d55cdeddffe576411baec2..0000000000000000000000000000000000000000 Binary files a/unitgrade_private2/codejudge_example/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/unitgrade_private2/codejudge_example/codejudge_sum.py b/unitgrade_private2/codejudge_example/codejudge_sum.py deleted file mode 100644 index 8a43e38b2f3ccb5f88aa86378a88332c90498043..0000000000000000000000000000000000000000 --- a/unitgrade_private2/codejudge_example/codejudge_sum.py +++ /dev/null @@ -1,35 +0,0 @@ -# Implement https://www.codejudge.net/docs/quickstartfiles#the-problem -from unitgrade.unitgrade import QuestionGroup, Report, QPrintItem -from unitgrade.unitgrade_helpers import evaluate_report_student -from cs101courseware_example import homework1 -import random - -class SumItem(QPrintItem): - ls = [] - def __init__(self, question, *args, **kwargs): - super().__init__(question, *args, **kwargs) - - def compute_answer_print(self): - random.seed(42) - - from unitgrade_private.codejudge_example.sumfac import sumlist - return sumlist(self.ls) - -class SumQuestion(QuestionGroup): - title = "Sum of two integers" - def __init__(self): - pass - - class FactorialQuestion(QPrintItem): - n = 3 - def compute_answer_print(self): - from unitgrade.unitgrade_private.codejudge_sum import factorial - return factorial(self.n) - -class Report1(Report): - title = "CS 101 Report 1" - questions = [(ListReversalQuestion, 5), (LinearRegressionQuestion, 13)] - pack_imports = [homework1] # Include this file in .token file - -if __name__ == "__main__": - evaluate_report_student(Report1()) diff --git a/unitgrade_private2/codejudge_example/sumfac.py b/unitgrade_private2/codejudge_example/sumfac.py deleted file mode 100644 index f429a12c93fa7709d5008532d17209b46a9d55ff..0000000000000000000000000000000000000000 --- a/unitgrade_private2/codejudge_example/sumfac.py +++ /dev/null @@ -1,6 +0,0 @@ -def sumlist( ls ): - return sum(ls) - -if __name__ == "__main__": - sumlist([1, 4, 4]) -