diff --git a/examples/example_docker/instructor/cs103/homework1.py b/examples/example_docker/instructor/cs103/homework1.py index 381bf362e46072db6d70d1a46a0af430d08d3e2b..286b79fbac40c2d02b5874c0a73fc387835ce2b3 100644 --- a/examples/example_docker/instructor/cs103/homework1.py +++ b/examples/example_docker/instructor/cs103/homework1.py @@ -1,26 +1,15 @@ -#################### -# Question 1. Write a function reverse_list which accepts a list, and returns a new list -# with the same elements but in opposite order. -#################### -def reverse_list(mylist): - # TODO: Your solution here - result = [] - for k in mylist: - result = [k] + result +def reverse_list(mylist): #!f + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + return list(reversed(mylist)) - return result - -def simple_list_question(): - print("The reverse list function can reverse a list") - l = [1, 2, 3, 4] - print("List was:", l, "reversed version", reverse_list(l)) - -def add(a,b): +def add(a,b): #!f + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ return a+b -def my_sum(ls): - return sum(ls) - if __name__ == "__main__": # Problem 1: Write a function which add two numbers print(f"Your result of 2 + 2 = {add(2,2)}") diff --git a/examples/example_docker/students/cs103/homework1.py b/examples/example_docker/students/cs103/homework1.py index 4b1b3d0630146d06b4eb8aa5fa4e85383a67a287..8da29bcba98539d428fcd38f1f74f0ca5c3b7336 100644 --- a/examples/example_docker/students/cs103/homework1.py +++ b/examples/example_docker/students/cs103/homework1.py @@ -1,11 +1,7 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -#################### -# Question 1. Write a function reverse_list which accepts a list, and returns a new list -# with the same elements but in opposite order. -#################### -def reverse_list(mylist): +def reverse_list(mylist): #!f # TODO: Your solution here result = [] for k in mylist: @@ -13,17 +9,9 @@ def reverse_list(mylist): return result -def simple_list_question(): - print("The reverse list function can reverse a list") - l = [1, 2, 3, 4] - print("List was:", l, "reversed version", reverse_list(l)) - -def add(a,b): +def add(a,b): #!f return a+b -def my_sum(ls): - return sum(ls) - if __name__ == "__main__": # Problem 1: Write a function which add two numbers print(f"Your result of 2 + 2 = {add(2,2)}") diff --git a/examples/example_framework/instructor/cs102/homework1.py b/examples/example_framework/instructor/cs102/homework1.py index 91b0091bed1ee028baf253ed64bcbea1b5c39a44..286b79fbac40c2d02b5874c0a73fc387835ce2b3 100644 --- a/examples/example_framework/instructor/cs102/homework1.py +++ b/examples/example_framework/instructor/cs102/homework1.py @@ -1,12 +1,13 @@ - def reverse_list(mylist): #!f - # TODO: Your solution here - result = [] - for k in mylist: - result = [k] + result - return result + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ + return list(reversed(mylist)) def add(a,b): #!f + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ return a+b if __name__ == "__main__": diff --git a/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-38.pyc b/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-38.pyc index 06533b81f4a38e074943a390119e27b775bcdb6d..d71c0905a85dfe88c5fe08fe3973d3de7ff67421 100644 Binary files a/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-38.pyc and b/examples/example_simplest/instructor/cs101/__pycache__/report1.cpython-38.pyc differ diff --git a/examples/example_simplest/instructor/cs101/deploy.py b/examples/example_simplest/instructor/cs101/deploy.py index a6e71d26120baa4bfa119fc7a71b711f381ae620..8906532315f44a5077a5d4de3313beeb1707f35c 100644 --- a/examples/example_simplest/instructor/cs101/deploy.py +++ b/examples/example_simplest/instructor/cs101/deploy.py @@ -1,6 +1,7 @@ from report1 import Report1 from unitgrade_private2.hidden_create_files import setup_grade_file_report from snipper import snip_dir +import shutil # from unitgrade_private2.hidden_gather_upload import gather_upload_to_campusnet if __name__ == "__main__": @@ -11,5 +12,8 @@ if __name__ == "__main__": snip_dir.snip_dir(source_dir="../cs101", dest_dir="../../students/cs101", clean_destination_dir=True, exclude=['*.token', 'deploy.py']) + # For my own sake, copy the homework to the other examples. + for f in ['../../../example_framework/instructor/cs102/homework1.py', '../../../example_docker/instructor/cs103/homework1.py']: + shutil.copy('homework1.py', f) diff --git a/examples/example_simplest/instructor/cs101/report1.py b/examples/example_simplest/instructor/cs101/report1.py index bf3abb2349c52eda4b761a446ecdcfa6723e5236..43d5b78d115896b59a95a9512f793ea897a7d7ba 100644 --- a/examples/example_simplest/instructor/cs101/report1.py +++ b/examples/example_simplest/instructor/cs101/report1.py @@ -1,6 +1,6 @@ from unitgrade2.unitgrade2 import Report from unitgrade2.unitgrade_helpers2 import evaluate_report_student -from homework1 import reverse_list, my_sum, add +from homework1 import reverse_list, add import unittest class Week1(unittest.TestCase): diff --git a/examples/example_simplest/instructor/cs101/report1_grade.py b/examples/example_simplest/instructor/cs101/report1_grade.py index 39676795103e78b8616518b23ee70a2aa6d4a407..463905795f87f40705eededa56fdd2437a6c2537 100644 --- a/examples/example_simplest/instructor/cs101/report1_grade.py +++ b/examples/example_simplest/instructor/cs101/report1_grade.py @@ -324,10 +324,12 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module': + if m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) + module_import = True else: top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = os.path.dirname(top_package) @@ -348,6 +350,7 @@ def gather_imports(imp): resources['zipfile'] = zip_buffer.getvalue() resources['top_package'] = top_package + resources['module_import'] = module_import return resources, top_package if f.endswith("__init__.py"): @@ -425,7 +428,7 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar"):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n self._running = False\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n show_progress_bar = True\n nL = 80\n if show_progress_bar:\n tsecs = np.round( self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n current = 1\n possible = 1\n # tsecs = 2\n ss = "PASS" if current == possible else "*** FAILED"\n if tsecs >= 0.1:\n ss += " ("+ str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n self.testsRun += 1\n # print("Starting the test...")\n show_progress_bar = True\n n = 1\n j = 1\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if show_progress_bar:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache.\n _cache2 = None # User-written cache\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n res = testMethod()\n elapsed = time.time() - t\n # if res == None:\n # res = {}\n # res[\'time\'] = elapsed\n sd = self.shortDescription()\n self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n # self._test_fun_output = res\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def unique_cache_id(self):\n k0 = self.cache_id()\n key = ()\n for i in itertools.count():\n key = k0 + (i,)\n if not self._cache2_contains(key):\n break\n return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def _cache2_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache2\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n id = self.unique_cache_id()\n if not self._cache_contains(id):\n print("Warning, framework missing key", id)\n\n self.assertEqual(first, self._cache_get(id, first), msg)\n self._cache_put(id, first)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False):\n now = datetime.now()\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n\n # Use the sequential test loader instead. See here:\n class SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n testcase_methods = list(testCaseClass.__dict__.keys())\n test_names.sort(key=testcase_methods.index)\n return test_names\n loader = SequentialTestLoader()\n # loader = unittest.TestLoader()\n # loader.suiteClass = MySuite\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n # print(suite)\n qtitle = q.__name__\n # qtitle = q.title if hasattr(q, "title") else q.id()\n # q.title = qtitle\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = possible - len(res.errors)\n\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = w * int(obtained * 1.0 / possible )\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\':\n top_package = os.path.dirname(m.__file__)\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = 80\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n sources = {}\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n # json_str = json.dumps(results, indent=4)\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, my_sum, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport homework1\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar"):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n self._running = False\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n show_progress_bar = True\n nL = 80\n if show_progress_bar:\n tsecs = np.round( self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n current = 1\n possible = 1\n # tsecs = 2\n ss = "PASS" if current == possible else "*** FAILED"\n if tsecs >= 0.1:\n ss += " ("+ str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n self.testsRun += 1\n # print("Starting the test...")\n show_progress_bar = True\n n = 1\n j = 1\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if show_progress_bar:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache.\n _cache2 = None # User-written cache\n\n @classmethod\n def reset(cls):\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n res = testMethod()\n elapsed = time.time() - t\n # if res == None:\n # res = {}\n # res[\'time\'] = elapsed\n sd = self.shortDescription()\n self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n # self._test_fun_output = res\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def unique_cache_id(self):\n k0 = self.cache_id()\n key = ()\n for i in itertools.count():\n key = k0 + (i,)\n if not self._cache2_contains(key):\n break\n return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def _cache2_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache2\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n id = self.unique_cache_id()\n if not self._cache_contains(id):\n print("Warning, framework missing key", id)\n\n self.assertEqual(first, self._cache_get(id, first), msg)\n self._cache_put(id, first)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False):\n now = datetime.now()\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n\n # Use the sequential test loader instead. See here:\n class SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n testcase_methods = list(testCaseClass.__dict__.keys())\n test_names.sort(key=testcase_methods.index)\n return test_names\n loader = SequentialTestLoader()\n # loader = unittest.TestLoader()\n # loader.suiteClass = MySuite\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n # print(suite)\n qtitle = q.__name__\n # qtitle = q.title if hasattr(q, "title") else q.id()\n # q.title = qtitle\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = possible - len(res.errors)\n\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = w * int(obtained * 1.0 / possible )\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = 80\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n sources = {}\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n # json_str = json.dumps(results, indent=4)\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' name="Report1" diff --git a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc index bb56042cf06886f37d2a90541707d8d27c8df1ef..7dbbdac7ae33a6d3d1764a740a5f47ff3e00b252 100644 Binary files a/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc and b/examples/example_simplest/students/cs101/__pycache__/homework1.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc b/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc index 06533b81f4a38e074943a390119e27b775bcdb6d..d71c0905a85dfe88c5fe08fe3973d3de7ff67421 100644 Binary files a/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc and b/examples/example_simplest/students/cs101/__pycache__/report1.cpython-38.pyc differ diff --git a/examples/example_simplest/students/cs101/homework1.py b/examples/example_simplest/students/cs101/homework1.py index 6ae4a8b7bd1e5971168e35bf97db41623b33f62c..3543f1ba46b63eec3a2c2e007ee998660c7136c6 100644 --- a/examples/example_simplest/students/cs101/homework1.py +++ b/examples/example_simplest/students/cs101/homework1.py @@ -1,28 +1,17 @@ """ Example student code. This file is automatically generated from the files in the instructor-directory """ -import numpy as np -from sklearn.datasets import load_boston -from sklearn.linear_model import LinearRegression -from sklearn.metrics import mean_squared_error, r2_score - -#################### -# Question 1. Write a function reverse_list which accepts a list, and returns a new list -# with the same elements but in opposite order. -#################### def reverse_list(mylist): - # TODO: 6 lines missing. - raise NotImplementedError("Implement function body") - -def simple_list_question(): - # TODO: 3 lines missing. - raise NotImplementedError("Implement function body") - -def add(a,b): + """ + Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g. + reverse_list([1,2,3]) should return [3,2,1] (as a list). + """ # TODO: 1 lines missing. raise NotImplementedError("Implement function body") -def my_sum(ls): +def add(a,b): + """ Given two numbers `a` and `b` this function should simply return their sum: + > add(a,b) = a+b """ # TODO: 1 lines missing. raise NotImplementedError("Implement function body") diff --git a/examples/example_simplest/students/cs101/report1.py b/examples/example_simplest/students/cs101/report1.py index bcede9c429f1121f3f4e1a542c5b5248183f2233..6c51d24d8254363cec13a39fbbf976ff36d9fc48 100644 --- a/examples/example_simplest/students/cs101/report1.py +++ b/examples/example_simplest/students/cs101/report1.py @@ -3,7 +3,7 @@ Example student code. This file is automatically generated from the files in the """ from unitgrade2.unitgrade2 import Report from unitgrade2.unitgrade_helpers2 import evaluate_report_student -from homework1 import reverse_list, my_sum, add +from homework1 import reverse_list, add import unittest class Week1(unittest.TestCase): @@ -14,11 +14,11 @@ class Week1(unittest.TestCase): def test_reverse(self): self.assertEqual(reverse_list([1,2,3]), [3,2,1]) -import homework1 +import cs101 class Report1(Report): title = "CS 101 Report 1" questions = [(Week1, 10)] # Include a single question for 10 credits. - pack_imports = [homework1] + pack_imports = [cs101] if __name__ == "__main__": # Uncomment to simply run everything as a unittest: diff --git a/examples/example_simplest/students/cs101/report1_grade.py b/examples/example_simplest/students/cs101/report1_grade.py index 696b7caf7ae7586eedb511b1a1bcd317ad58e842..4a5f73c87c0d209dc0a60ad0afbe79e13e9974c1 100644 --- a/examples/example_simplest/students/cs101/report1_grade.py +++ b/examples/example_simplest/students/cs101/report1_grade.py @@ -326,10 +326,12 @@ def gather_imports(imp): # dn = os.path.dirname(f) # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = str(__import__(m.__name__.split('.')[0]).__path__) - if m.__class__.__name__ == 'module': + if m.__class__.__name__ == 'module' and False: top_package = os.path.dirname(m.__file__) + module_import = True else: top_package = __import__(m.__name__.split('.')[0]).__path__._path[0] + module_import = False # top_package = os.path.dirname(__import__(m.__name__.split('.')[0]).__file__) # top_package = os.path.dirname(top_package) @@ -350,6 +352,7 @@ def gather_imports(imp): resources['zipfile'] = zip_buffer.getvalue() resources['top_package'] = top_package + resources['module_import'] = module_import return resources, top_package if f.endswith("__init__.py"): @@ -427,7 +430,7 @@ def source_instantiate(name, report1_source, payload): -report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar"):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n self._running = False\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n show_progress_bar = True\n nL = 80\n if show_progress_bar:\n tsecs = np.round( self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n current = 1\n possible = 1\n # tsecs = 2\n ss = "PASS" if current == possible else "*** FAILED"\n if tsecs >= 0.1:\n ss += " ("+ str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n self.testsRun += 1\n # print("Starting the test...")\n show_progress_bar = True\n n = 1\n j = 1\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if show_progress_bar:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache.\n _cache2 = None # User-written cache\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n res = testMethod()\n elapsed = time.time() - t\n # if res == None:\n # res = {}\n # res[\'time\'] = elapsed\n sd = self.shortDescription()\n self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n # self._test_fun_output = res\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def unique_cache_id(self):\n k0 = self.cache_id()\n key = ()\n for i in itertools.count():\n key = k0 + (i,)\n if not self._cache2_contains(key):\n break\n return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def _cache2_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache2\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n id = self.unique_cache_id()\n if not self._cache_contains(id):\n print("Warning, framework missing key", id)\n\n self.assertEqual(first, self._cache_get(id, first), msg)\n self._cache_put(id, first)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False):\n now = datetime.now()\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n\n # Use the sequential test loader instead. See here:\n class SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n testcase_methods = list(testCaseClass.__dict__.keys())\n test_names.sort(key=testcase_methods.index)\n return test_names\n loader = SequentialTestLoader()\n # loader = unittest.TestLoader()\n # loader.suiteClass = MySuite\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n # print(suite)\n qtitle = q.__name__\n # qtitle = q.title if hasattr(q, "title") else q.id()\n # q.title = qtitle\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = possible - len(res.errors)\n\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = w * int(obtained * 1.0 / possible )\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\':\n top_package = os.path.dirname(m.__file__)\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = 80\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n sources = {}\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n # json_str = json.dumps(results, indent=4)\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, my_sum, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport homework1\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [homework1]' +report1_source = 'import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\nimport pickle\nimport itertools\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n weight = 1 # the weight of the question.\n\n def __init__(self, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n # self._correct_answer_payload = correct_answer_payload\n self.question = question\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print(f"Not equal within tolerance {tol=}; deviation was {np.max(diff)=}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol=}, {np.max(diff)=}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Untitled question"\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n has_called_init_ = False\n _name = None\n _items = None\n\n @property\n def items(self):\n if self._items == None:\n self._items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for I in members:\n self._items.append( I(question=self))\n return self._items\n\n @items.setter\n def items(self, value):\n self._items = value\n\n @property\n def name(self):\n if self._name == None:\n self._name = self.__class__.__name__\n return self._name #\n\n @name.setter\n def name(self, val):\n self._name = val\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\n def init_all_item_questions(self):\n for item in self.items:\n if not item.question.has_called_init_:\n item.question.init()\n item.question.has_called_init_ = True\n\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n\n @classmethod\n def reset(cls):\n for (q,_) in cls.questions:\n if hasattr(q, \'reset\'):\n q.reset()\n\n def _file(self):\n return inspect.getfile(type(self))\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(self._file()))\n\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n\n if payload is not None:\n self.set_payload(payload, strict=strict)\n # else:\n # if os.path.isfile(self.computed_answers_file):\n # self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n # else:\n # s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n\n def main(self, verbosity=1):\n # Run all tests using standard unittest (nothing fancy).\n import unittest\n loader = unittest.TestLoader()\n for q,_ in self.questions:\n import time\n start = time.time() # A good proxy for setup time is to\n suite = loader.loadTestsFromTestCase(q)\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n total = time.time() - start\n q.time = total\n\n def _setup_answers(self):\n self.main() # Run all tests in class just to get that out of the way...\n report_cache = {}\n for q, _ in self.questions:\n if hasattr(q, \'_save_cache\'):\n q()._save_cache()\n q._cache[\'time\'] = q.time\n report_cache[q.__qualname__] = q._cache\n else:\n report_cache[q.__qualname__] = {\'no cache see _setup_answers in unitgrade2.py\':True}\n return report_cache\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n q._cache = payloads[q.__qualname__]\n\n # for item in q.items:\n # if q.name not in payloads or item.name not in payloads[q.name]:\n # s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n # if strict:\n # raise Exception(s)\n # else:\n # print(s)\n # else:\n # item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n # item.estimated_time = payloads[q.name][item.name].get("time", 1)\n # q.estimated_time = payloads[q.name].get("time", 1)\n # if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n # item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n # try:\n # if "title" in payloads[q.name][item.name]: # can perhaps be removed later.\n # item.title = payloads[q.name][item.name][\'title\']\n # except Exception as e: # Cannot set attribute error. The title is a function (and probably should not be).\n # pass\n # # print("bad", e)\n # self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar"):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n self.n = int(np.round(self.t / self.dt))\n # self.pbar = tqdm.tqdm(total=self.n)\n if start:\n self.start()\n\n def start(self):\n self._running = True\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n self.time_started = time.time()\n\n def terminate(self):\n self._running = False\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n return time.time() - self.time_started\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n\n\nfrom unittest.suite import _isnotsuite\n\nclass MySuite(unittest.suite.TestSuite): # Not sure we need this one anymore.\n pass\n\ndef instance_call_stack(instance):\n s = "-".join(map(lambda x: x.__name__, instance.__class__.mro()))\n return s\n\ndef get_class_that_defined_method(meth):\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\ndef caller_name(skip=2):\n """Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.\n\n An empty string is returned if skipped levels exceed stack height\n """\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return \'\'\n parentframe = stack[start][0]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if \'self\' in parentframe.f_locals:\n # I don\'t know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\'self\'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != \'<module>\': # top level usually\n name.append( codename ) # function or a method\n\n ## Avoid circular refs and frame leaks\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\n del parentframe, stack\n\n return ".".join(name)\n\ndef get_class_from_frame(fr):\n import inspect\n args, _, _, value_dict = inspect.getargvalues(fr)\n # we check the first parameter for the frame function is\n # named \'self\'\n if len(args) and args[0] == \'self\':\n # in that case, \'self\' will be referenced in value_dict\n instance = value_dict.get(\'self\', None)\n if instance:\n # return its class\n # isinstance(instance, Testing) # is the actual class instance.\n\n return getattr(instance, \'__class__\', None)\n # return None otherwise\n return None\n\nfrom typing import Any\nimport inspect, gc\n\ndef giveupthefunc():\n frame = inspect.currentframe()\n code = frame.f_code\n globs = frame.f_globals\n functype = type(lambda: 0)\n funcs = []\n for func in gc.get_referrers(code):\n if type(func) is functype:\n if getattr(func, "__code__", None) is code:\n if getattr(func, "__globals__", None) is globs:\n funcs.append(func)\n if len(funcs) > 1:\n return None\n return funcs[0] if funcs else None\n\n\nfrom collections import defaultdict\n\nclass UTextResult(unittest.TextTestResult):\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self.successes = []\n\n def printErrors(self) -> None:\n # if self.dots or self.showAll:\n # self.stream.writeln()\n self.printErrorList(\'ERROR\', self.errors)\n self.printErrorList(\'FAIL\', self.failures)\n\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n # super().addSuccess(test)\n self.successes.append(test)\n # super().addSuccess(test)\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n show_progress_bar = True\n nL = 80\n if show_progress_bar:\n tsecs = np.round( self.cc.terminate(), 2)\n sys.stdout.flush()\n ss = self.item_title_print\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n current = 1\n possible = 1\n # tsecs = 2\n ss = "PASS" if current == possible else "*** FAILED"\n if tsecs >= 0.1:\n ss += " ("+ str(tsecs) + " seconds)"\n print(ss)\n\n\n def startTest(self, test):\n # super().startTest(test)\n self.testsRun += 1\n # print("Starting the test...")\n show_progress_bar = True\n n = 1\n j = 1\n item_title = self.getDescription(test)\n item_title = item_title.split("\\n")[0]\n self.item_title_print = "*** q%i.%i) %s" % (n + 1, j + 1, item_title)\n estimated_time = 10\n nL = 80\n #\n if show_progress_bar:\n self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print)\n else:\n print(self.item_title_print + (\'.\' * max(0, nL - 4 - len(self.item_title_print))), end="")\n\n self._test = test\n\n def _setupStdout(self):\n if self._previousTestClass == None:\n total_estimated_time = 2\n if hasattr(self.__class__, \'q_title_print\'):\n q_title_print = self.__class__.q_title_print\n else:\n q_title_print = "<unnamed test. See unitgrade.py>"\n\n # q_title_print = "some printed title..."\n cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n self.cc = cc\n\n def _restoreStdout(self): # Used when setting up the test.\n if self._previousTestClass == None:\n q_time = self.cc.terminate()\n q_time = np.round(q_time, 2)\n sys.stdout.flush()\n print(self.cc.title, end="")\n # start = 10\n # q_time = np.round(time.time() - start, 2)\n nL = 80\n print(" " * max(0, nL - len(self.cc.title)) + (\n " (" + str(q_time) + " seconds)" if q_time >= 0.1 else "")) # if q.name in report.payloads else "")\n print("=" * nL)\n\nfrom unittest.runner import _WritelnDecorator\nfrom io import StringIO\n\nclass UTextTestRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n from io import StringIO\n stream = StringIO()\n super().__init__(*args, stream=stream, **kwargs)\n\n def _makeResult(self):\n stream = self.stream # not you!\n stream = sys.stdout\n stream = _WritelnDecorator(stream)\n return self.resultclass(stream, self.descriptions, self.verbosity)\n\ndef wrapper(foo):\n def magic(self):\n s = "-".join(map(lambda x: x.__name__, self.__class__.mro()))\n # print(s)\n foo(self)\n magic.__doc__ = foo.__doc__\n return magic\n\nfrom functools import update_wrapper, _make_key, RLock\nfrom collections import namedtuple\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\ndef cache(foo, typed=False):\n """ Magic cache wrapper\n https://github.com/python/cpython/blob/main/Lib/functools.py\n """\n maxsize = None\n def wrapper(self, *args, **kwargs):\n key = self.cache_id() + ("cache", _make_key(args, kwargs, typed))\n if not self._cache_contains(key):\n value = foo(self, *args, **kwargs)\n self._cache_put(key, value)\n else:\n value = self._cache_get(key)\n return value\n return wrapper\n\n\nclass UTestCase(unittest.TestCase):\n _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n _cache = None # Read-only cache.\n _cache2 = None # User-written cache\n\n @classmethod\n def reset(cls):\n cls._outcome = None\n cls._cache = None\n cls._cache2 = None\n\n def _get_outcome(self):\n if not (self.__class__, \'_outcome\') or self.__class__._outcome == None:\n self.__class__._outcome = {}\n return self.__class__._outcome\n\n def _callTestMethod(self, testMethod):\n t = time.time()\n res = testMethod()\n elapsed = time.time() - t\n # if res == None:\n # res = {}\n # res[\'time\'] = elapsed\n sd = self.shortDescription()\n self._cache_put( (self.cache_id(), \'title\'), self._testMethodName if sd == None else sd)\n # self._test_fun_output = res\n self._get_outcome()[self.cache_id()] = res\n self._cache_put( (self.cache_id(), "time"), elapsed)\n\n\n # This is my base test class. So what is new about it?\n def cache_id(self):\n c = self.__class__.__qualname__\n m = self._testMethodName\n return (c,m)\n\n def unique_cache_id(self):\n k0 = self.cache_id()\n key = ()\n for i in itertools.count():\n key = k0 + (i,)\n if not self._cache2_contains(key):\n break\n return key\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._load_cache()\n self.cache_indexes = defaultdict(lambda: 0)\n\n def _ensure_cache_exists(self):\n if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n self.__class__._cache = dict()\n if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n self.__class__._cache2 = dict()\n\n def _cache_get(self, key, default=None):\n self._ensure_cache_exists()\n return self.__class__._cache.get(key, default)\n\n def _cache_put(self, key, value):\n self._ensure_cache_exists()\n self.__class__._cache2[key] = value\n\n def _cache_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache\n\n def _cache2_contains(self, key):\n self._ensure_cache_exists()\n return key in self.__class__._cache2\n\n def assertEqualC(self, first: Any, msg: Any = ...) -> None:\n id = self.unique_cache_id()\n if not self._cache_contains(id):\n print("Warning, framework missing key", id)\n\n self.assertEqual(first, self._cache_get(id, first), msg)\n self._cache_put(id, first)\n\n def _cache_file(self):\n return os.path.dirname(inspect.getfile(self.__class__) ) + "/unitgrade/" + self.__class__.__name__ + ".pkl"\n\n def _save_cache(self):\n # get the class name (i.e. what to save to).\n cfile = self._cache_file()\n if not os.path.isdir(os.path.dirname(cfile)):\n os.makedirs(os.path.dirname(cfile))\n\n if hasattr(self.__class__, \'_cache2\'):\n with open(cfile, \'wb\') as f:\n pickle.dump(self.__class__._cache2, f)\n\n # But you can also set cache explicitly.\n def _load_cache(self):\n if self._cache != None: # Cache already loaded. We will not load it twice.\n return\n # raise Exception("Loaded cache which was already set. What is going on?!")\n cfile = self._cache_file()\n print("Loading cache from", cfile)\n if os.path.exists(cfile):\n with open(cfile, \'rb\') as f:\n data = pickle.load(f)\n self.__class__._cache = data\n else:\n print("Warning! data file not found", cfile)\n\ndef hide(func):\n return func\n\ndef makeRegisteringDecorator(foreignDecorator):\n """\n Returns a copy of foreignDecorator, which is identical in every\n way(*), except also appends a .decorator property to the callable it\n spits out.\n """\n def newDecorator(func):\n # Call to newDecorator(method)\n # Exactly like old decorator, but output keeps track of what decorated it\n R = foreignDecorator(func) # apply foreignDecorator, like call to foreignDecorator(method) would have done\n R.decorator = newDecorator # keep track of decorator\n # R.original = func # might as well keep track of everything!\n return R\n\n newDecorator.__name__ = foreignDecorator.__name__\n newDecorator.__doc__ = foreignDecorator.__doc__\n # (*)We can be somewhat "hygienic", but newDecorator still isn\'t signature-preserving, i.e. you will not be able to get a runtime list of parameters. For that, you need hackish libraries...but in this case, the only argument is func, so it\'s not a big issue\n return newDecorator\n\nhide = makeRegisteringDecorator(hide)\n\ndef methodsWithDecorator(cls, decorator):\n """\n Returns all methods in CLS with DECORATOR as the\n outermost decorator.\n\n DECORATOR must be a "registering decorator"; one\n can make any decorator "registering" via the\n makeRegisteringDecorator function.\n\n import inspect\n ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n for f in ls:\n print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n """\n for maybeDecorated in cls.__dict__.values():\n if hasattr(maybeDecorated, \'decorator\'):\n if maybeDecorated.decorator == decorator:\n print(maybeDecorated)\n yield maybeDecorated\n\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\n\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\n\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n\n # try: # For registering stats.\n # import unitgrade_private\n # import irlc.lectures\n # import xlwings\n # from openpyxl import Workbook\n # import pandas as pd\n # from collections import defaultdict\n # dd = defaultdict(lambda: [])\n # error_computed = []\n # for k1, (q, _) in enumerate(report.questions):\n # for k2, item in enumerate(q.items):\n # dd[\'question_index\'].append(k1)\n # dd[\'item_index\'].append(k2)\n # dd[\'question\'].append(q.name)\n # dd[\'item\'].append(item.name)\n # dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n # error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n #\n # qstats = report.wdir + "/" + report.name + ".xlsx"\n #\n # if os.path.isfile(qstats):\n # d_read = pd.read_excel(qstats).to_dict()\n # else:\n # d_read = dict()\n #\n # for k in range(1000):\n # key = \'run_\'+str(k)\n # if key in d_read:\n # dd[key] = list(d_read[\'run_0\'].values())\n # else:\n # dd[key] = error_computed\n # break\n #\n # workbook = Workbook()\n # worksheet = workbook.active\n # for col, key in enumerate(dd.keys()):\n # worksheet.cell(row=1, column=col+1).value = key\n # for row, item in enumerate(dd[key]):\n # worksheet.cell(row=row+2, column=col+1).value = item\n #\n # workbook.save(qstats)\n # workbook.close()\n #\n # except ModuleNotFoundError as e:\n # s = 234\n # pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n\n\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\nclass UnitgradeTextRunner(unittest.TextTestRunner):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False):\n now = datetime.now()\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if hasattr(report, "version") and report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n\n # Use the sequential test loader instead. See here:\n class SequentialTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n testcase_methods = list(testCaseClass.__dict__.keys())\n test_names.sort(key=testcase_methods.index)\n return test_names\n loader = SequentialTestLoader()\n # loader = unittest.TestLoader()\n # loader.suiteClass = MySuite\n\n for n, (q, w) in enumerate(report.questions):\n # q = q()\n q_hidden = False\n # q_hidden = issubclass(q.__class__, Hidden)\n if question is not None and n+1 != question:\n continue\n suite = loader.loadTestsFromTestCase(q)\n # print(suite)\n qtitle = q.__name__\n # qtitle = q.title if hasattr(q, "title") else q.id()\n # q.title = qtitle\n q_title_print = "Question %i: %s"%(n+1, qtitle)\n print(q_title_print, end="")\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # unittest.Te\n # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]\n UTextResult.q_title_print = q_title_print # Hacky\n res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)\n z = 234\n # for j, item in enumerate(q.items):\n # if qitem is not None and question is not None and j+1 != qitem:\n # continue\n #\n # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.\n # # if not item.question.has_called_init_:\n # start = time.time()\n #\n # cc = None\n # if show_progress_bar:\n # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )\n # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)\n # from unitgrade import Capturing # DON\'T REMOVE THIS LINE\n # with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n # try:\n # for q2 in q_with_outstanding_init:\n # q2.init()\n # q2.has_called_init_ = True\n #\n # # item.question.init() # Initialize the question. Useful for sharing resources.\n # except Exception as e:\n # if not passall:\n # if not silent:\n # print(" ")\n # print("="*30)\n # print(f"When initializing question {q.title} the initialization code threw an error")\n # print(e)\n # print("The remaining parts of this question will likely fail.")\n # print("="*30)\n #\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(q_title_print, end="")\n #\n # q_time =np.round( time.time()-start, 2)\n #\n # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n # print("=" * nL)\n # q_with_outstanding_init = None\n #\n # # item.question = q # Set the parent question instance for later reference.\n # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n #\n # if show_progress_bar:\n # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n # else:\n # print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n # hidden = issubclass(item.__class__, Hidden)\n # # if not hidden:\n # # print(ss, end="")\n # # sys.stdout.flush()\n # start = time.time()\n #\n # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n # q_[j] = {\'w\': item.weight, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n # tsecs = np.round(time.time()-start, 2)\n # if show_progress_bar:\n # cc.terminate()\n # sys.stdout.flush()\n # print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n #\n # if not hidden:\n # ss = "PASS" if current == possible else "*** FAILED"\n # if tsecs >= 0.1:\n # ss += " ("+ str(tsecs) + " seconds)"\n # print(ss)\n\n # ws, possible, obtained = upack(q_)\n\n possible = res.testsRun\n obtained = possible - len(res.errors)\n\n\n # possible = int(ws @ possible)\n # obtained = int(ws @ obtained)\n # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n\n obtained = w * int(obtained * 1.0 / possible )\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'title\': qtitle}\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = str(__import__(m.__name__.split(\'.\')[0]).__path__)\n if m.__class__.__name__ == \'module\' and False:\n top_package = os.path.dirname(m.__file__)\n module_import = True\n else:\n top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n module_import = False\n\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(top_package)\n import zipfile\n # import strea\n # zipfile.ZipFile\n import io\n # file_like_object = io.BytesIO(my_zip_data)\n zip_buffer = io.BytesIO()\n with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n # zip.write()\n for root, dirs, files in os.walk(top_package):\n for file in files:\n if file.endswith(".py"):\n fpath = os.path.join(root, file)\n v = os.path.relpath(os.path.join(root, file), os.path.dirname(top_package))\n zip.write(fpath, v)\n\n resources[\'zipfile\'] = zip_buffer.getvalue()\n resources[\'top_package\'] = top_package\n resources[\'module_import\'] = module_import\n return resources, top_package\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = 80\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n sources = {}\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for k, m in enumerate(report.pack_imports):\n nimp, top_package = gather_imports(m)\n report_relative_location = os.path.relpath(inspect.getfile(report.__class__), top_package)\n nimp[\'report_relative_location\'] = report_relative_location\n nimp[\'name\'] = m.__name__\n sources[k] = nimp\n # if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n # sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n # json_str = json.dumps(results, indent=4)\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n with open(token, \'wb\') as f:\n pickle.dump(results, f)\n\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.9.0"\n\nfrom homework1 import reverse_list, add\nimport unittest\n\nclass Week1(unittest.TestCase):\n def test_add(self):\n self.assertEqual(add(2,2), 4)\n self.assertEqual(add(-100, 5), -95)\n\n def test_reverse(self):\n self.assertEqual(reverse_list([1,2,3]), [3,2,1])\n\nimport cs101\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(Week1, 10)] # Include a single question for 10 credits.\n pack_imports = [cs101]' report1_payload = '8004953f000000000000007d948c055765656b31947d948c2c6e6f20636163686520736565205f73657475705f616e737765727320696e20756e69746772616465322e7079948873732e' name="Report1"