diff --git a/requirements.txt b/requirements.txt index 5fcd9059477be6d2e794970466b92d320122be00..14d8388e06a39ff23deaf10219a96e164461396a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ colorama numpy scikit_learn snipper +requests # To read remote files for automatic updating. diff --git a/src/unitgrade.egg-info/PKG-INFO b/src/unitgrade.egg-info/PKG-INFO index 772041a71b27c560a01dc13606d14fa7ddcd8803..05dd3dfef46325d46efc7deb04762f75acb732a4 100644 --- a/src/unitgrade.egg-info/PKG-INFO +++ b/src/unitgrade.egg-info/PKG-INFO @@ -1,13 +1,12 @@ Metadata-Version: 2.1 Name: unitgrade -Version: 0.1.21 +Version: 0.1.23 Summary: A student homework/exam evaluation framework build on pythons unittest framework. Home-page: https://lab.compute.dtu.dk/tuhe/unitgrade Author: Tue Herlau Author-email: tuhe@dtu.dk License: MIT Project-URL: Bug Tracker, https://lab.compute.dtu.dk/tuhe/unitgrade/issues -Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent @@ -15,61 +14,68 @@ Requires-Python: >=3.8 Description-Content-Type: text/markdown License-File: LICENSE - # Unitgrade -Unitgrade is an automatic report and exam evaluation framework that enables instructors to offer automatically evaluated programming assignments. - Unitgrade is build on pythons `unittest` framework so that the tests can be specified in a familiar syntax and will integrate with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) and an easy and 100% safe mechanism for verifying the students results and creating additional, hidden tests. A powerful cache system allows instructors to automatically create test-answers based on a working solution. +Unitgrade is an automatic software testing framework that enables instructors to offer automatically evaluated programming assignments with a minimal overhead for students. + +Unitgrade is build on pythons `unittest` framework so that the tests can be specified and run in a familiar syntax, + and will integrate well with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) +and an easy and safe mechanism for verifying results. - 100% Python `unittest` compatible - - No external configuration files, just write a `unittest` - - No unnatural limitations: If you can `unittest` it, it works. - - Granular security model: - - Students get public `unittests` for easy development of solutions - - Students get a tamper-resistant file to create submissions which are uploaded - - Instructors can automatically verify the students solution using Docker VM and by running hidden tests - - Allow export of assignments to Autolab (no `make` file mysteries!) - - Tests are quick to run and will integrate with your IDE + - Integrates with any modern IDE (VSCode, Pycharm, Eclipse) + - No external configuration files or setup required + - Tests are quick to run and will tell you where your mistake is + - Hint-system collects hints from code and display it with failed unittests ## Installation -Unitgrade can be installed using `pip`: -``` +Unitgrade is simply installed like any other package using `pip`: +```terminal pip install unitgrade ``` -This will install unitgrade in your site-packages directory. If you want to upgrade an old installation of unitgrade: -``` -pip install unitgrade --upgrade -``` -If you are using anaconda+virtual environment you can install it as +This will install unitgrade in your site-packages directory and you should be all set. If you want to upgrade an old version of unitgrade run: +```terminal +pip install unitgrade --upgrade --no-cache-dir ``` +If you are using anaconda+virtual environment you can install it as any other package: +```terminal source activate myenv conda install git pip pip install unitgrade ``` - -When you are done, you should be able to import unitgrade: -``` -import unitgrade +When you are done, you should be able to import unitgrade. Type `python` in the termial and try: +```pycon +>>> import unitgrade2 ``` -## Evaluating a report -Homework is broken down into **reports**. A report is a collection of questions which are individually scored, and each question may in turn involve multiple tests. Each report is therefore given an overall score based on a weighted average of how many tests are passed. -In practice, a report consist of an ordinary python file which they simply run. It looks like this (to run this on your local machine, follow the instructions in the previous section): +## Using Unitgrade +In unitgrade, your homework assignments are called **reports** and are distributed as regular `.py`-files. I am going to use `cs101report1.py` as a generic example in the following, but a real-world example can be found here: https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/blob/master/examples/example_simplest/students/cs101/report1.py . + +The report is simply a collection of questions which are individually scored, and each question may in turn involve checking several sub-cases. + +You should think of the tests as a help for you when you are debugging your code and when you are trying to figure out what to do. +I recommend running the tests through your IDE. In pycharm, this is as simple as right-clicking on the test and selecting `Run as unittest`. The image belows shows the outcome in Pycharm: + + + +The tests are shown in the lower-left corner, and in this case they are all green meaning they have passed. If a test fails, you can right-click and select `debug as unittest`, or you can click on it and see the output it produced, and you can right-click on individual tests to re-run them. + +### Checking your score +To check your score, you have to run the main script (`cs101report1.py`) as a regular python file. This can be done either through pycharm (Hint: Open the file and press `alt-shift-F10`) or in the console by running the command: ``` python cs101report1.py ``` -The file `cs101report1.py` is just an ordinary, non-obfuscated file which they can navigate and debug using a debugger. The file may contain the homework, or it may call functions the students have written. Running the file creates console output which tells the students their current score for each test: - +The file will run and show an output where the score of each question is computed as a (weighted) average of the individual passed tests. An example is given below: ```terminal _ _ _ _ _____ _ | | | | (_) | | __ \ | | | | | |_ __ _| |_| | \/_ __ __ _ __| | ___ | | | | '_ \| | __| | __| '__/ _` |/ _` |/ _ \ | |_| | | | | | |_| |_\ \ | | (_| | (_| | __/ - \___/|_| |_|_|\__|\____/_| \__,_|\__,_|\___| v0.0.3, started: 07/09/2021 00:42:25 + \___/|_| |_|_|\__|\____/_| \__,_|\__,_|\___| v0.1.22, started: 19/05/2022 15:16:20 Week 4: Looping (use --help for options) -Question 1: Test the cluster analysis method +Question 1: Cluster analysis * q1.1) clusterAnalysis([0.8, 0.0, 0.6]) = [1, 2, 1] ?.............................................................PASS * q1.2) clusterAnalysis([0.5, 0.6, 0.3, 0.3]) = [2, 2, 1, 1] ?.....................................................PASS * q1.3) clusterAnalysis([0.2, 0.7, 0.3, 0.5, 0.0]) = [1, 2, 1, 2, 1] ?.............................................PASS @@ -92,14 +98,14 @@ Question 3: Bacteria growth rates * q3.5) bacteriaGrowth(100, 0.4, 1000, 99) = 0 ?...................................................................PASS * q3) Total.................................................................................................... 10/10 -Question 4: Test the fermentation rate question +Question 4: Fermentation rate * q4.1) fermentationRate([20.1, 19.3, 1.1, 18.2, 19.7, ...], 15, 25) = 19.600 ?....................................PASS * q4.2) fermentationRate([20.1, 19.3, 1.1, 18.2, 19.7, ...], 1, 200) = 29.975 ?....................................PASS * q4.3) fermentationRate([1.75], 1, 2) = 1.750 ?...................................................................PASS * q4.4) fermentationRate([20.1, 19.3, 1.1, 18.2, 19.7, ...], 18.2, 20) = 19.500 ?..................................PASS * q4) Total.................................................................................................... 10/10 -Total points at 00:42:25 (0 minutes, 0 seconds)....................................................................40/40 +Total points at 15:16:20 (0 minutes, 0 seconds)....................................................................40/40 Provisional evaluation --------- ----- q1) Total 10/10 @@ -115,38 +121,28 @@ To register your results, please run the file: In the same manner as you ran this file. ``` -Once you are happy with the result run the script with the `_grade.py`-postfix, in this case `cs101report1_grade.py`: - +### Handing in your homework +Once you are happy with your results and want to hand in, you should run the script with the `_grade.py`-postfix, in this case `cs101report1_grade.py` (see console output above): ``` python cs101report1_grade.py ``` -This runs the same tests, and generates a file `Report0_handin_18_of_18.token`. The file name indicates how many points you got. Upload this file to campusnet (and no other). - -## Running the tests in pycharm -Naturally, you can also run the tests in pycharm, and this offers you a lot of cool features such as integration with the debugger and the ability to see which tests have failed. -To do this, simply right-click on the `report.py`-file and select `Run as unittest` (or alternatively, `debug as unittest`). This will take you to a screen such as shown below: - - - -You can see all tests are green indicating they all pass. If you click on a test you can see the console output it generates and you can -right-click on the tests to re-run individual tests. - +This script will run *the same tests as before* and generates a file named `Report0_handin_18_of_18.token` (this is called the `token`-file because of the extension). The token-file contains all your results and it is the token-file you should upload (and no other). Because you cannot (and most definitely should not!) edit it, it shows the number of points in the file-name. ### Why are there two scripts? -The reason why we use a standard test script, and one with the `_grade.py` extension, is because the tests should both be easy to debug, but at the same time we have to prevent accidential changes to the test scripts. Hence, we include two versions of the tests. +The reason why we use a standard test script (one with the `_grade.py` extension and one without), is because the tests should both be easy to debug, but at the same time we have to avoid accidential changes to the test scripts. The tests themselves are the same, so if one script works, so should the other. # FAQ - **My non-grade script and the `_grade.py` script gives different number of points** Since the two scripts should contain the same code, the reason is nearly certainly that you have made an (accidental) change to the test scripts. Please ensure both scripts are up-to-date and if the problem persists, try to get support. - **Why is there a `unitgrade` directory with a bunch of pickle files? Should I also upload them?** -No. The file contains the pre-computed test results your code is compared against. If you want to load this file manually, the unitgrade package contains helpful functions for doing so. +No. The file contains the pre-computed test results your code is compared against. You should only upload the `.token` file, nothing else - **I am worried you might think I cheated because I opened the '_grade.py' script/token file** This should not be a concern. Both files are in a binary format (i.e., if you open them in a text editor they look like garbage), which means that if you make an accidential change, they will with all probability simply fail to work. - **I think I might have edited the `report1.py` file. Is this a problem since one of the tests have now been altered?** -Feel free to edit/break this file as much as you like if it helps you work out the correct solution. In fact, I recommend you just run `report1.py` from your IDE and use the debugger to work out the current state of your program. However, since the `report1_grade.py` script contains a seperate version of the tests, please ensure your `report1.py` file is up to date. +Feel free to edit/break this file as much as you like if it helps you work out the correct solution. However, since the `report1_grade.py` script contains a seperate version of the tests, please ensure both files are in sync to avoid unexpected behavior. ### Debugging your code/making the tests pass The course material should contain information about the intended function of the scripts used in the tests, and the file `report1.py` should mainly be used to check which of your code is being run. In other words, first make sure your code solves the exercises, and only later run the test script which is less easy/nice to read. @@ -154,8 +150,8 @@ However, obivously you might get to a situation where your code seems to work, b - **I am 99% sure my code is correct, but the test still fails. Why is that?** The testing framework offers a great deal of flexibility in terms of what is compared. This is either: (i) The value a function returns, (ii) what the code print to the console (iii) something derived from these. - Since the test *might* compare the console output, i.e. what you generate using `print("...")`-statements, innnocent changes to the script, like an extra print statement, can cause the test to fail, which is counter-intuitive. For this reason, please look at the error message carefully (or the code in `report1.py`) to understand what is being compared. - +When a test fails, you should always try to insert a breakpoint on exactly the line that generate the problem, run the test in the debugger, and figure out what the expected result was supposed to be. This should give you a clear hint as to what may be wrong. + One possibility that might trick some is that if the test compares a value computed by your code, the datatype of that value is important. For instance, a `list` is not the same as a python `ndarray`, and a `tuple` is different from a `list`. This is the correct behavior of a test: These things are not alike and correct code should not confuse them. - **The `report1.py` class is really confusing. I can see the code it runs on my computer, but not the expected output. Why is it like this?** @@ -165,10 +161,7 @@ To make sure the desired output of the tests is always up to date, the tests are There are a number of console options available to help you figure out what your program should output and what it currently outputs. They can be found using: ```python report1.py --help``` Note these are disabled for the `report1_grade.py` script to avoid confusion. It is not recommended you use the grade script to debug your code. - - - **How do I see the output generated by my scripts in the IDE?** -The file `unitgrade/unitgrade.py` contains all relevant information. Look at the `QItem` class and the function `get_points`, which is the function that strings together all the tests. - + - **Since I cannot read the `.token` file, can I trust it contains the same number of points internally as the file name indicate?** Yes. @@ -177,25 +170,23 @@ Yes. That the script `report1_grade.py` is difficult to read is not the principle safety measure. Instead, it ensures there is no accidential tampering. If you muck around with these files and upload the result, we will very likely know. - **I have private data on my computer. Will this be read or uploaded?** -No. The code will look for and upload your solutions, but it will not read/look at other directories in your computer. In the example provided with this code, this means you should expect unitgrade to read/run all files in the `cs101courseware_example`-directory, but **no** other files on your computer. So as long as you keep your private files out of the base courseware directory, you should be fine. +No. The code will look for and upload your solutions, but it will not read/look at other directories in your computer. As long as your keep your private files out of the directory containing your homework you have nothing to worry about. - **Does this code install any spyware/etc.? Does it communicate with a website/online service?** -No. Unitgrade makes no changes outside the courseware directory and it does not do anything tricky. It reads/runs code and write the `.token` file. +No. Unitgrade makes no changes outside the courseware directory and it does not do anything tricky. It reads/runs code and produce the `.token` file. - **I still have concerns about running code on my computer I cannot easily read** Please contact me and we can discuss your specific concerns. - # Citing ```bibtex @online{unitgrade, - title={Unitgrade (0.0.3): \texttt{pip install unitgrade}}, + title={Unitgrade (0.1.22): \texttt{pip install unitgrade}}, url={https://lab.compute.dtu.dk/tuhe/unitgrade}, - urldate = {2021-09-07}, + urldate = {2022-05-19}, month={9}, publisher={Technical University of Denmark (DTU)}, author={Tue Herlau}, - year={2021}, + year={2022}, } ``` - diff --git a/src/unitgrade/evaluate.py b/src/unitgrade/evaluate.py index d6efdd88adb3be143f7f48f470153c443c28ab67..ab805b8595409ea77fb32fdcd5ffcd96de73e93e 100644 --- a/src/unitgrade/evaluate.py +++ b/src/unitgrade/evaluate.py @@ -90,10 +90,6 @@ def upack(q): h = np.asarray(h) return h[:,0], h[:,1], h[:,2], -# class UnitgradeTextRunner(unittest.TextTestRunner): -# def __init__(self, *args, **kwargs): -# super().__init__(*args, **kwargs) - class SequentialTestLoader(unittest.TestLoader): def getTestCaseNames(self, testCaseClass): test_names = super().getTestCaseNames(testCaseClass) @@ -121,9 +117,10 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa dt_string = now.strftime("%d/%m/%Y %H:%M:%S") print(b + " v" + __version__ + ", started: " + dt_string+ "\n") # print("Started: " + dt_string) + report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync. s = report.title if hasattr(report, "version") and report.version is not None: - s += " version " + report.version + s += f" version {report.version}" print(s, "(use --help for options)" if show_help_flag else "") # print(f"Loaded answers from: ", report.computed_answers_file, "\n") table_data = [] diff --git a/src/unitgrade/framework.py b/src/unitgrade/framework.py index badb2d45ba89d597c131d6aed192f7ef38d9c493..ce186799373950d3ec6e895e2f09233686c22583 100644 --- a/src/unitgrade/framework.py +++ b/src/unitgrade/framework.py @@ -8,11 +8,14 @@ import colorama import unittest import time import textwrap +import urllib.parse +import requests +import ast +import numpy from unitgrade.runners import UTextResult from unitgrade.utils import gprint, Capturing2, Capturing colorama.init(autoreset=True) # auto resets your settings after every output -import numpy numpy.seterr(all='raise') @@ -24,13 +27,17 @@ def setup_dir_by_class(C, base_dir): class Report: title = "report title" abbreviate_questions = False # Should the test items start with 'Question ...' or just be q1). + version = None # A version number of the report (1.0). Used to compare version numbers with online resources. + url = None # Remote location of this problem. - version = None questions = [] pack_imports = [] individual_imports = [] + + _remote_check_cooldown_seconds = 1 # Seconds between remote check of report. nL = 120 # Maximum line width _config = None # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation. + _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc. @classmethod def reset(cls): @@ -45,6 +52,10 @@ class Report: def _file(self): return inspect.getfile(type(self)) + def _is_run_in_grade_mode(self): + """ True if this report is being run as part of a grade run. """ + return self._file().endswith("_grade.py") # Not sure I love this convention. + def _import_base_relative(self): if hasattr(self.pack_imports[0], '__path__'): root_dir = self.pack_imports[0].__path__[0] @@ -72,7 +83,7 @@ class Report: # Run all tests using standard unittest (nothing fancy). loader = unittest.TestLoader() for q, _ in self.questions: - start = time.time() # A good proxy for setup time is to + start = time.time() # suite = loader.loadTestsFromTestCase(q) unittest.TextTestRunner(verbosity=verbosity).run(suite) total = time.time() - start @@ -110,6 +121,109 @@ class Report: q._cache = payloads[q.__qualname__] self._config = payloads['config'] + def _check_remote_versions(self): + if self.url is None: + return + url = self.url + if not url.endswith("/"): + url += "/" + snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot" + print("Sanity checking time using snapshot", snapshot_file) + print("and using self-identified file", self._file()) + + if os.path.isfile(snapshot_file): + with open(snapshot_file, 'r') as f: + t = f.read() + if (time.time() - float(t)) < self._remote_check_cooldown_seconds: + return + # print("Is this file run in local mode?", self._is_run_in_grade_mode()) + + if self.url.startswith("https://gitlab"): + # Try to turn url into a 'raw' format. + "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false" + # url = self.url + url = url.replace("-/tree", "-/raw") + print(url) + # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab" + # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false" + # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false" + raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false") + print("Is this file run in local mode?", self._is_run_in_grade_mode()) + if self._is_run_in_grade_mode(): + remote_source = requests.get(raw_url).text + with open(self._file(), 'r') as f: + local_source = f.read() + if local_source != remote_source: + print("\nThe local version of this report is not identical to the remote version which can be found at") + print(self.url) + print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.") + print("You should check if there was an announcement and update the test to the most recent version; most likely") + print("This can be done by running the command") + print("> git pull") + print("You can find the most recent code here:") + print(self.url) + raise Exception(f"Version of grade script does not match the remote version. Please update using git pull") + # + # # node = ast.parse(text) + # # classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0] + # + # # for b in classes.body: + # # print(b.) + # # if b.targets[0].id == "version": + # # print(b) + # # print(b.value) + # version_remote = b.value.value + # break + # if version_remote != self.version: + else: + text = requests.get(raw_url).text + node = ast.parse(text) + classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0] + for b in classes.body: + # print(b.) + if b.targets[0].id == "version": + # print(b) + # print(b.value) + version_remote = b.value.value + break + if version_remote != self.version: + print("\nThe version of this report", self.version, "does not match the version of the report on git", version_remote) + print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.") + print("You should check if there was an announcement and update the test to the most recent version; most likely") + print("This can be done by running the command") + print("> git pull") + print("You can find the most recent code here:") + print(self.url) + raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.") + + for (q,_) in self.questions: + qq = q(skip_remote_check=True) + cfile = qq._cache_file() + + relpath = os.path.relpath(cfile, os.path.dirname(self._file())) + relpath = relpath.replace("\\", "/") + raw_url = urllib.parse.urljoin(url, relpath + "?inline=false") + # requests.get(raw_url) + + with open(cfile, 'rb') as f: + b1 = f.read() + + b2 = requests.get(raw_url).content + if b1 != b2: + print("\nQuestion ", qq.title, "relies on the data file", cfile) + print("However, it appears that this file is missing or in a different version than the most recent found here:") + print(self.url) + print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.") + print("You should check if there was an announcement and update the test to the most recent version; most likely") + print("This can be done by simply running the command") + print("> git pull") + print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!") + raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.") + + t = time.time() + if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"): + with open(snapshot_file, 'w') as f: + f.write(f"{t}") def get_hints(ss): if ss == None: @@ -143,7 +257,6 @@ class UTestCase(unittest.TestCase): # during setup, and the deploy script must be run many times. _setup_answers_mode = False - def capture(self): if hasattr(self, '_stdout') and self._stdout is not None: file = self._stdout @@ -165,6 +278,7 @@ class UTestCase(unittest.TestCase): @classmethod def reset(cls): print("Warning, I am not sure UTestCase.reset() is needed anymore and it seems very hacky.") + raise Exception("reset called in test case. This method is deprecated.") cls._outcome = None cls._cache = None cls._cache2 = None @@ -258,9 +372,6 @@ class UTestCase(unittest.TestCase): cc[rel][fun] = (l, "\n".join(comments)) # print("found", rel, fun) self._cache_put((self.cache_id(), 'coverage'), self._covcache) - # print("ending loop B") - # print("At end of outer loop A") - # print("-------------------------------------------- Tear down called") def shortDescriptionStandard(self): sd = super().shortDescription() @@ -304,10 +415,44 @@ class UTestCase(unittest.TestCase): m = self._testMethodName return c, m - def __init__(self, *args, **kwargs): + def __init__(self, *args, skip_remote_check=False, **kwargs): super().__init__(*args, **kwargs) self._load_cache() self._assert_cache_index = 0 + # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the + # cache exists locally. + # Find the report class this class is defined within. + if skip_remote_check: + return + import inspect + + # file = inspect.getfile(self.__class__) + import importlib, inspect + found_reports = [] + print("But do I have report", self._report) + print("I think I am module", self.__module__) + print("Importlib says", importlib.import_module(self.__module__)) + # This will delegate you to the wrong main clsas when running in grade mode. + for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass): + print("checking", cls) + if issubclass(cls, Report): + for q,_ in cls.questions: + if q == self.__class__: + found_reports.append(cls) + if len(found_reports) == 0: + pass # This case occurs when the report _grade script is being run. + # raise Exception("This question is not a member of a report. Very, very odd.") + if len(found_reports) > 1: + raise Exception("This question is a member of multiple reports. That should not be the case -- don't get too creative.") + if len(found_reports) > 0: + report = found_reports[0] + report()._check_remote_versions() + + # self._get_report_class() + + + # def _get_report_class(self): + # pass def _ensure_cache_exists(self): if not hasattr(self.__class__, '_cache') or self.__class__._cache == None: @@ -378,10 +523,7 @@ class UTestCase(unittest.TestCase): assert(np.all(np.isinf(a1) == np.isinf(a2))) # Check infinite part. a1[np.isinf(a1)] = 0 a2[np.isinf(a2)] = 0 - diff = np.abs(a1 - a2) - - # print(a1, a2, diff) return diff @@ -455,19 +597,15 @@ class UTestCase(unittest.TestCase): cfile = self._cache_file() if os.path.exists(cfile): try: - # print("\ncache file", cfile) with open(cfile, 'rb') as f: data = pickle.load(f) self.__class__._cache = data except Exception as e: - print("Bad cache", cfile) + print("Cache file did not exist:", cfile) print(e) else: print("Warning! data file not found", cfile) - # def _feedFailuresToResult(self, result, errors): - # print("asdfdf") - def _feedErrorsToResult(self, result, errors): """ Use this to show hints on test failure. """ if not isinstance(result, UTextResult): @@ -492,7 +630,7 @@ class UTestCase(unittest.TestCase): gprint(f"> - {l}") er = er[0] - + doc = er._testMethodDoc # print("doc", doc) if doc is not None: @@ -519,6 +657,20 @@ class UTestCase(unittest.TestCase): def startTestRun(self): super().startTestRun() +class Required: + pass + +class ParticipationTest(UTestCase,Required): + max_group_size = None + students_in_group = None + workload_assignment = {'Question 1': [1, 0, 0]} + + def test_students(self): + pass + + def test_workload(self): + pass + # 817, 705 class NotebookTestCase(UTestCase): notebook = None diff --git a/src/unitgrade/version.py b/src/unitgrade/version.py index 3f671b2d626ecf65a57b2c563dc8d04546f19769..272bafdb5017dfe3417cda6bcaa162ea26bc5754 100644 --- a/src/unitgrade/version.py +++ b/src/unitgrade/version.py @@ -1 +1 @@ -__version__ = "0.1.22" \ No newline at end of file +__version__ = "0.1.23" \ No newline at end of file