diff --git a/setup.py b/setup.py index 4111242502ab690d7888a41ef564d840f0547101..175b207b9361ddfb9395ea4a54853c1398fd8517 100644 --- a/setup.py +++ b/setup.py @@ -3,6 +3,8 @@ Use this guide: https://packaging.python.org/tutorials/packaging-projects/ py -m build && twine upload dist/* +linux: python -m build && twine upload dist/* + git add . && git commit -m"updates" && git push sudo pip install -e ./ """ diff --git a/src/unitgrade.egg-info/PKG-INFO b/src/unitgrade.egg-info/PKG-INFO index f0b3f814c4ae65f711abbbb839ad1201074a617b..6555d1d1ff85394f335200889482a24736a4a3a8 100644 --- a/src/unitgrade.egg-info/PKG-INFO +++ b/src/unitgrade.egg-info/PKG-INFO @@ -1,13 +1,12 @@ Metadata-Version: 2.1 Name: unitgrade -Version: 0.1.28.8 +Version: 0.1.30.1 Summary: A student homework/exam evaluation framework build on pythons unittest framework. Home-page: https://lab.compute.dtu.dk/tuhe/unitgrade Author: Tue Herlau Author-email: tuhe@dtu.dk License: MIT Project-URL: Bug Tracker, https://lab.compute.dtu.dk/tuhe/unitgrade/issues -Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent @@ -16,29 +15,44 @@ Description-Content-Type: text/markdown License-File: LICENSE # Unitgrade +Unitgrade is an autograding framework which enables instructors to offer automatically evaluated programming assignments in a maximally convenient format for the students. -Unitgrade is an automatic software testing framework that enables instructors to offer automatically evaluated programming assignments with a minimal overhead for students. - -Unitgrade is build on pythons `unittest` framework so that the tests can be specified and run in a familiar syntax, - and will integrate well with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) +Unitgrade is build on pythons `unittest` framework; i.e., you can directly use your existing unittests without any changes. It will therefore integrate well with any modern IDE. What it offers beyond `unittest` is the ability to collect tests in reports (for automatic evaluation) and an easy and safe mechanism for verifying results. - 100% Python `unittest` compatible - Integrates with any modern IDE (VSCode, Pycharm, Eclipse) - - No external configuration files or setup required - - Tests are quick to run and will tell you where your mistake is + - No external configuration files - Hint-system collects hints from code and display it with failed unittests + - A dashboard gives the students an overview of their progress + - Safe and convenient to administer + - +### Why this instead of an online autograder? +Online autograding services often say that they have adapter their particular model in order to make students better or happier. I did a small thought-experiments, and asked myself what I would ideally want out of an autograder if I was a student. I quickly realized the only thing I really cared about was easily it allowed me to fix bugs in my homework assignments. In other words, I think students prioritize the same thing as we all do when we write software tests -- to quickly and easily identify and fix problems. + +However, I would not use an online autograder for any of my own software projects for a number of reasons: + - Why would I want my tests to be executed in another environment than my development-environment? + - Why would I want to copy-paste code online (or rely on a sub-standard web-IDE without autocomplete?) + - The lack of a debugger would drive me nuts + - Why `alt+tab` to an external tool when my IDE already has excellent test plugins? + - *Your test will be run within a few minutes* + - Something as basic as `print`-statements is often not readily available; I don't know any services that shows them live + - Often students have no access to the tests themselves, perhaps because they rely on special terminal commands. This means it can be hard to reason about what code is *actually* being run. + +This raises the question that if I would not want to use an online autograder as a way to fix issues in my own software projects, why should students prefer it? + +The alternative is in my view obvious -- simply give students a suite of unittests. This raises some potential issues such as safety and administrative convenience, but they turned out to be easy to solve. If you want to learn more about developing tests see the test-development repository here: https://gitlab.compute.dtu.dk/tuhe/unitgrade_private ## Installation -Unitgrade is simply installed like any other package using `pip`: +Unitgrade requires python 3.8 or higher, and can be installed using `pip`: ```terminal pip install unitgrade ``` -This will install unitgrade in your site-packages directory and you should be all set. If you want to upgrade an old version of unitgrade run: +After the command completes you should be all. If you want to upgrade an old version of unitgrade run: ```terminal pip install unitgrade --upgrade --no-cache-dir ``` -If you are using anaconda+virtual environment you can install it as any other package: +If you are using anaconda+virtual environment you can also install it as you would any other package: ```terminal source activate myenv conda install git pip @@ -46,23 +60,22 @@ pip install unitgrade ``` When you are done, you should be able to import unitgrade. Type `python` in the termial and try: ```pycon ->>> import unitgrade2 +>>> import unitgrade ``` ## Using Unitgrade -In unitgrade, your homework assignments are called **reports** and are distributed as regular `.py`-files. I am going to use `cs101report1.py` as a generic example in the following, but a real-world example can be found here: https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/blob/master/examples/example_simplest/students/cs101/report1.py . +Your homework assignments are called **reports** and are distributed as a regular `.py`-files. In the following I will use `cs101report1.py` as an example, and you can find a real-world example here: https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/blob/master/examples/example_simplest/students/cs101/report1.py . -The report is simply a collection of questions which are individually scored, and each question may in turn involve checking several sub-cases. +A report is simply a collection of questions, and each question may in turn involve several tests. -You should think of the tests as a help for you when you are debugging your code and when you are trying to figure out what to do. -I recommend running the tests through your IDE. In pycharm, this is as simple as right-clicking on the test and selecting `Run as unittest`. The image belows shows the outcome in Pycharm: +I recommend running the tests through your IDE. In pycharm, this is as simple as right-clicking on the test and selecting `Run as unittest`:  -The tests are shown in the lower-left corner, and in this case they are all green meaning they have passed. If a test fails, you can right-click and select `debug as unittest`, or you can click on it and see the output it produced, and you can right-click on individual tests to re-run them. +The outcome of the tests are shown in the lower-left corner, and in this case they are all green meaning they have passed. You can see the console output generated by a test by clicking on it. If a test fails, you can select `debug as unittest` from the menu above to launch a debugger, and you can right-click on individual tests to re-run them. ### Checking your score -To check your score, you have to run the main script (`cs101report1.py`) as a regular python file. This can be done either through pycharm (Hint: Open the file and press `alt-shift-F10`) or in the console by running the command: +To check your score, you have to run the main script (`cs101report1.py`). This can be done either through pycharm (Hint: Open the file and press `alt-shift-F10`) or in the console by running the command: ``` python cs101report1.py ``` @@ -73,9 +86,9 @@ The file will run and show an output where the score of each question is compute | | | |_ __ _| |_| | \/_ __ __ _ __| | ___ | | | | '_ \| | __| | __| '__/ _` |/ _` |/ _ \ | |_| | | | | | |_| |_\ \ | | (_| | (_| | __/ - \___/|_| |_|_|\__|\____/_| \__,_|\__,_|\___| v0.1.22, started: 19/05/2022 15:16:20 + \___/|_| |_|_|\__|\____/_| \__,_|\__,_|\___| v0.1.29.0, started: 16/09/2022 13:47:57 -Week 4: Looping (use --help for options) +02531 week 5: Looping (use --help for options) Question 1: Cluster analysis * q1.1) clusterAnalysis([0.8, 0.0, 0.6]) = [1, 2, 1] ?.............................................................PASS * q1.2) clusterAnalysis([0.5, 0.6, 0.3, 0.3]) = [2, 2, 1, 1] ?.....................................................PASS @@ -106,7 +119,7 @@ Question 4: Fermentation rate * q4.4) fermentationRate([20.1, 19.3, 1.1, 18.2, 19.7, ...], 18.2, 20) = 19.500 ?..................................PASS * q4) Total.................................................................................................... 10/10 -Total points at 15:16:20 (0 minutes, 0 seconds)....................................................................40/40 +Total points at 13:48:02 (0 minutes, 4 seconds)....................................................................40/40 Provisional evaluation --------- ----- q1) Total 10/10 @@ -118,7 +131,7 @@ Total 40/40 Note your results have not yet been registered. To register your results, please run the file: ->>> report1intro_grade.py +>>> looping_tests_grade.py In the same manner as you ran this file. ``` @@ -129,14 +142,37 @@ python cs101report1_grade.py ``` This script will run *the same tests as before* and generates a file named `Report0_handin_18_of_18.token` (this is called the `token`-file because of the extension). The token-file contains all your results and it is the token-file you should upload (and no other). Because you cannot (and most definitely should not!) edit it, it shows the number of points in the file-name. +### The dashboard +I recommend to watch and run the tests from your IDE, as this allows you to use the debugger in conjunction with your tests. However, I have put together a dashboard that allows you to see the outcome of individual tests and what is currently recorded in your `token`-file. To start the dashboard, simply run the command +``` +unitgrade +``` +from a directory that contains a test (the directory will be searched recursively for tests). The command will start a small background service and open this page: + + +Features supported in the current version: + - Shows you which files need to be edited to solve the problem + - Collect hints given in the homework files and display them for the relevant tests + - fully responsive -- the UI, including the terminal, will update while the test is running regardless of where you launch the test + - Allows you to re-run tests + - Shows current test status and results captured in `.token`-file + - Tested on Windows/Linux + - No binaries or special setup required; everything is 100% python + +Note that the run feature currently assumes that your system-wide `python` command can run the tests. This may not be the case if you are using virtual environments -- I expect to fix this soon. + ### Why are there two scripts? -The reason why we use a standard test script (one with the `_grade.py` extension and one without), is because the tests should both be easy to debug, but at the same time we have to avoid accidential changes to the test scripts. The tests themselves are the same, so if one script works, so should the other. + # FAQ + + - **Why is there two scripts?** +The reason why we use a standard test script (one with the `_grade.py` extension and one without), is because the tests should both be easy to debug, but at the same time we have to avoid accidential changes to the test scripts. The tests themselves are the same, so if one script works, so should the other. + - **My non-grade script and the `_grade.py` script gives different number of points** -Since the two scripts should contain the same code, the reason is nearly certainly that you have made an (accidental) change to the test scripts. Please ensure both scripts are up-to-date and if the problem persists, try to get support. +Since the two scripts should contain the same code, the reason is with near certainty that you have made an (accidental) change to the test scripts. Please ensure both scripts are up-to-date and if the problem persists, get support. - - **Why is there a `unitgrade` directory with a bunch of pickle files? Should I also upload them?** + - **Why is there a `unitgrade`-directory with a bunch of pickle files? Should I also upload them?** No. The file contains the pre-computed test results your code is compared against. You should only upload the `.token` file, nothing else - **I am worried you might think I cheated because I opened the '_grade.py' script/token file** @@ -146,35 +182,35 @@ This should not be a concern. Both files are in a binary format (i.e., if you op Feel free to edit/break this file as much as you like if it helps you work out the correct solution. However, since the `report1_grade.py` script contains a seperate version of the tests, please ensure both files are in sync to avoid unexpected behavior. ### Debugging your code/making the tests pass -The course material should contain information about the intended function of the scripts used in the tests, and the file `report1.py` should mainly be used to check which of your code is being run. In other words, first make sure your code solves the exercises, and only later run the test script which is less easy/nice to read. -However, obivously you might get to a situation where your code seems to work, but a test fails. In that case, it is worth looking into the code in `report1.py` to work out what is going on. +The course material should contain information about the intended function of the scripts, and the file `report1.py` should mainly be used to check which of your code is being run. In other words, first make sure your code solves the exercises, and only later run the test script which is less easy/nice to read. +However, obivously you might get to a situation where your code seems to work, but a test fails. In that case, it is worth looking into the code in `report1.py` to work out what exactly is going on. - **I am 99% sure my code is correct, but the test still fails. Why is that?** The testing framework offers a great deal of flexibility in terms of what is compared. This is either: (i) The value a function returns, (ii) what the code print to the console (iii) something derived from these. When a test fails, you should always try to insert a breakpoint on exactly the line that generate the problem, run the test in the debugger, and figure out what the expected result was supposed to be. This should give you a clear hint as to what may be wrong. -One possibility that might trick some is that if the test compares a value computed by your code, the datatype of that value is important. For instance, a `list` is not the same as a python `ndarray`, and a `tuple` is different from a `list`. This is the correct behavior of a test: These things are not alike and correct code should not confuse them. +One possibility that might trick some is that if the test compares a value computed by your code, the datatype of that value may be important. For instance, a `list` is not the same as a python `ndarray`, and a `tuple` is different from a `list`. - **The `report1.py` class is really confusing. I can see the code it runs on my computer, but not the expected output. Why is it like this?** To make sure the desired output of the tests is always up to date, the tests are computed from a working version of the code and loaded from the disk rather than being hard-coded. - **How do I see the output of my programs in the tests? Or the intended output?** There are a number of console options available to help you figure out what your program should output and what it currently outputs. They can be found using: - ```python report1.py --help``` + ```python report1.py --help``` Note these are disabled for the `report1_grade.py` script to avoid confusion. It is not recommended you use the grade script to debug your code. - **Since I cannot read the `.token` file, can I trust it contains the same number of points internally as the file name indicate?** Yes. - + ### Privacy/security - **I managed to reverse engineer the `report1_grade.py`/`*.token` files in about 30 minutes. If the safety measures are so easily broken, how do you ensure people do not cheat?** -That the script `report1_grade.py` is difficult to read is not the principle safety measure. Instead, it ensures there is no accidential tampering. If you muck around with these files and upload the result, we will very likely know. +That the script `report1_grade.py` is difficult to read is not the principle safety measure. Instead, it ensures there is no accidential tampering. If you muck around with these files and upload the result, we will very likely know you edited them. - **I have private data on my computer. Will this be read or uploaded?** -No. The code will look for and upload your solutions, but it will not read/look at other directories in your computer. As long as your keep your private files out of the directory containing your homework you have nothing to worry about. +No. The code will look for and include yours solutions in the `.token`-file, but it will not read/look at other directories in your computer. As long as your keep your private files out of the directory that contains your homework you have nothing to worry about. - **Does this code install any spyware/etc.? Does it communicate with a website/online service?** -No. Unitgrade makes no changes outside the courseware directory and it does not do anything tricky. It reads/runs code and produce the `.token` file. +Unitgrade makes no changes outside the courseware directory, and it does not do anything tricky. It reads/runs code and produces the `.token` file. The development version of unitgrade has an experimental feature to look at a github page and check your version fo the tests is up-to-date, but this is currently not enabled and all this would do is to warn you about a potential problem with an outdated test. - **I still have concerns about running code on my computer I cannot easily read** Please contact me and we can discuss your specific concerns. @@ -182,13 +218,12 @@ Please contact me and we can discuss your specific concerns. # Citing ```bibtex @online{unitgrade, - title={Unitgrade (0.1.22): \texttt{pip install unitgrade}}, + title={Unitgrade (0.1.29.0): \texttt{pip install unitgrade}}, url={https://lab.compute.dtu.dk/tuhe/unitgrade}, - urldate = {2022-05-19}, + urldate = {2022-09-16}, month={9}, publisher={Technical University of Denmark (DTU)}, author={Tue Herlau}, year={2022}, } ``` - diff --git a/src/unitgrade.egg-info/requires.txt b/src/unitgrade.egg-info/requires.txt index be7c36005674cdcb922e2a9b2f9f0653e049265d..5f3ae35b0f7dcaae454d62a8d0ba234cd019b1d4 100644 --- a/src/unitgrade.egg-info/requires.txt +++ b/src/unitgrade.egg-info/requires.txt @@ -1,13 +1,13 @@ -Werkzeug -colorama -coverage -flask -flask_socketio -importnb numpy -pupdb -pyfiglet -requests tabulate +pyfiglet +coverage +colorama tqdm +importnb +requests watchdog +flask_socketio +flask +Werkzeug +diskcache diff --git a/src/unitgrade/dashboard/app.py b/src/unitgrade/dashboard/app.py index 1813228270c06e4319b3eaef75051152938e1544..b43da5d4be1ece1663170262881d195f173083d4 100644 --- a/src/unitgrade/dashboard/app.py +++ b/src/unitgrade/dashboard/app.py @@ -77,13 +77,14 @@ def mkapp(base_dir="./", use_command_line=True): db2.set('coverage_files_changed', [file]) elif type =="token": - a, b = load_token(file) - rs = {} - for k in a['details']: - for ikey in a['details'][k]['items']: - rs['-'.join(ikey)] = a['details'][k]['items'][ikey]['status'] - socketio.emit('token_update', {"full_path": file, 'token': os.path.basename(file), - 'results': rs, 'state': 'evaluated'}, namespace="/status") + if file is not None: + a, b = load_token(file) + rs = {} + for k in a['details']: + for ikey in a['details'][k]['items']: + rs['-'.join(ikey)] = a['details'][k]['items'][ikey]['status'] + socketio.emit('token_update', {"full_path": file, 'token': os.path.basename(file), + 'results': rs, 'state': 'evaluated'}, namespace="/status") else: raise Exception("Bad type: " + type) @@ -180,6 +181,8 @@ def mkapp(base_dir="./", use_command_line=True): if h.strip().startswith("*"): ahints.append('') h = h.strip()[1:] + if len(ahints) == 0: # In case we forgot to add a *-mark in front of the hint. + ahints.append('') ahints[-1] += "\n" + h hints[k] = (ahints, hints[k][1], hints[k][2]) # items[it_key_js] = diff --git a/src/unitgrade/dashboard/dbwatcher.py b/src/unitgrade/dashboard/dbwatcher.py index f5416b546d94bf0baeef6a7410feb567962e4e85..0162e1c1855c3dc28d5915603c69f4be5120dc69 100644 --- a/src/unitgrade/dashboard/dbwatcher.py +++ b/src/unitgrade/dashboard/dbwatcher.py @@ -21,7 +21,7 @@ class DBWatcher(Thread): def run(self): - print("A DB WATCHER INSTANCE HAS BEEN STARTED!") + # print("A DB WATCHER INSTANCE HAS BEEN STARTED!") # As long as we weren't asked to stop, try to take new tasks from the # queue. The tasks are taken with a blocking 'get', so no CPU # cycles are wasted while waiting. diff --git a/src/unitgrade/dashboard/ephermaltransfer.py b/src/unitgrade/dashboard/ephermaltransfer.py new file mode 100644 index 0000000000000000000000000000000000000000..c10de12324cd657fcdfcb7108a41655c4833981b --- /dev/null +++ b/src/unitgrade/dashboard/ephermaltransfer.py @@ -0,0 +1,91 @@ +import threading +from threading import Thread +import datetime +import time +from datetime import timedelta +import os + + + + +class AbstractDBWatcher(Thread): + def __init__(self, unitgrade_data_dir): + super().__init__() + self.stoprequest = threading.Event() + # self.unitgrade_data = unitgrade_data_dir + # self.watched_blocks_list = watched_blocks_list + from diskcache import Cache + self.db = Cache(unitgrade_data_dir) + # self.test_handle_function = test_handle_function + SLEEP_INTERVAL = 5 + + # def mark_all_as_fresh(self): + # for k in self.watched_blocks_list: + # self.db[k + "-updated"] = True + + def watch_function(self): + + pass + + def run(self): + # print("A DB WATCHER INSTANCE HAS BEEN STARTED!") + # As long as we weren't asked to stop, try to take new tasks from the + # queue. The tasks are taken with a blocking 'get', so no CPU + # cycles are wasted while waiting. + # Also, 'get' is given a timeout, so stoprequest is always checked, + # even if there's nothing in the queue. + while not self.stoprequest.is_set(): + ct = datetime.datetime.now() + self.watch_function() + # d = None + # k = "undef" + # for k in self.watched_blocks_list: + # ukey = k + "-updated" + # with self.db.transact(): + # if ukey in self.db and self.db[ukey] and k in self.db: + # d = self.db[k] # Dict of all values. + # self.db[ukey] = False + # break + time.sleep(max(0.2, (datetime.datetime.now()-ct).seconds ) ) + # if d is not None: + # self.test_handle_function(k, d) + + def join(self, timeout=None): + self.stoprequest.set() + super().join(timeout) + + def close(self): + self.join() + # print("Stopped DB watcher.") + + +class EphermalDBWatcher(AbstractDBWatcher): + def watch_function(self): + # self.db.get(tag="ephermal") + for k in self.db: + # get all keys in cache. + + val, tag = self.db.get(k, tag=True) + print(k, val, tag) + + pass + + +if __name__ == '__main__': + import os + f = os.path.dirname(__file__) + print(f) + f = os.path.normpath(f +"/../../../../unitgrade_private/devel/example_devel/instructor/cs108/unitgrade_data") + os.path.isdir(f) + + edbw = EphermalDBWatcher(unitgrade_data_dir=f) + edbw.start() + + + import wandb + import numpy as np + print("Random id", wandb.util.generate_id()) + wandb.init(project="unitgrade_report1", id="my_random_job", resume="allow") + wandb.log({'stuff': 'good', 'x': np.random.rand()}) + z = 234 + pass diff --git a/src/unitgrade/dashboard/static/sidebars.css b/src/unitgrade/dashboard/static/sidebars.css index 23f7a870b419ca15ede8fcdf98eb7d5536422af3..ba23410fca40af61a24ce3a5c6871695929639b1 100644 --- a/src/unitgrade/dashboard/static/sidebars.css +++ b/src/unitgrade/dashboard/static/sidebars.css @@ -37,11 +37,10 @@ min-height: 100vh; border: solid rgba(0, 0, 0, .15); border-width: 1px 0; box-shadow: inset 0 .5em 1.5em rgba(0, 0, 0, .1), inset 0 .125em .5em rgba(0, 0, 0, .15); - padding-left: 10px; + padding-left: 5px; padding-top: 10px; - padding-right: 10px; + padding-right: 5px; padding-bottom: 10px; - /* padding: 0px; */ } diff --git a/src/unitgrade/dashboard/templates/index3.html b/src/unitgrade/dashboard/templates/index3.html index 7497949ae6df5dee52dd2b8f3f0351e9f1db871f..4cdb2df8359f319970dd6295a6994e2d03560e1a 100644 --- a/src/unitgrade/dashboard/templates/index3.html +++ b/src/unitgrade/dashboard/templates/index3.html @@ -13,13 +13,13 @@ var terminals = {}; <script>terminals["{{ikey}}"] = null; </script> <div class="tab-pane fade {{ 'show active' if outer_loop.index == 1 and loop.index == 1 else ''}}" id="{{ikey}}-pane" role="tabpanel" aria-labelledby="{{ikey}}-pane-tab"> <!-- begin tab card --> -<h1>{{qbody.title}}</h1> -<h4> +<h3>{{qbody.title}}</h3> +<h5> <span class="{{ikey}}-status"> <span id="{{ikey}}-status"><i id="{{ikey}}-icon" class="bi bi-emoji-neutral"></i> <span class="text-left">{{ibody.title}}</span></span> </span> - {% if ibody.runable %}<a onclick="re_run_test('{{ikey}}');" type="button" class="btn btn-primary">Rerun</a>{% endif %} -</h4> + {% if ibody.runable %}<a onclick="re_run_test('{{ikey}}');" type="button" class="btn btn-primary btn-sm">Rerun</a>{% endif %} +</h5> <div class="card shadow mb-3 bg-white rounded"> <div class="card-header"> @@ -158,8 +158,8 @@ This will generate a <code>.token</code> file which contains your answers and wh <ul class="list-unstyled ps-0"> {% for qkey, qbody in questions.items() %} {% set outer_loop = loop %} - <li class="mb-1"> - <button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="#{{qkey}}-collapse" aria-expanded="true" > + <li class="mb-1 text-truncate"> + <button class="btn btn-toggle align-items-center rounded collapsed btn-sm" data-bs-toggle="collapse" data-bs-target="#{{qkey}}-collapse" aria-expanded="true" > {{qbody.title}} </button> <div class="collapse show" id="{{qkey}}-collapse"> <ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small" id="myTab"> @@ -171,7 +171,7 @@ This will generate a <code>.token</code> file which contains your answers and wh <div class="col col-lg-11 text-truncate" style="background-color: white;"> <button class="btn rounded collapsed nav-link {{ 'active' if outer_loop.index == 1 and loop.index == 1 else ''}} text-left" style="width: 100%;" id="{{ikey}}-pane-tab" data-bs-toggle="pill" data-bs-target="#{{ikey}}-pane" type="button" role="tab" aria-controls="{{ikey}}-pane" aria-selected="false" data-toggle="tab"> <span class="{{ikey}}-status"> - <span id="{{ikey}}-status"><i style="height: 16px;width: 16px;" id="{{ikey}}-icon" class="bi bi-emoji-neutral"></i> <span class="text-left">{{ibody.title}}</span></span> + <span id="{{ikey}}-status"><i style="height: 16px; width: 16px;" id="{{ikey}}-icon" class="bi bi-emoji-neutral"></i> <span class="text-left">{{ibody.title}}</span></span> </span> </button> </div> diff --git a/src/unitgrade/dashboard/wandb/latest-run b/src/unitgrade/dashboard/wandb/latest-run new file mode 120000 index 0000000000000000000000000000000000000000..3b62c7192e8cb6fe0b85d63b48662ff2984fba42 --- /dev/null +++ b/src/unitgrade/dashboard/wandb/latest-run @@ -0,0 +1 @@ +run-20221018_195338-my_random_job \ No newline at end of file diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/config.yaml b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13eacb6ce73655ed1c38349d77566e608abb6ccb --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/config.yaml @@ -0,0 +1,22 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.13.3 + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.10.6 + start_time: 1666112255.230647 + t: + 1: + - 55 + 2: + - 55 + 3: + - 14 + - 23 + 4: 3.10.6 + 5: 0.13.3 + 8: + - 5 diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/requirements.txt b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..080571eaecba26f64352311a286781804b31badb --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/requirements.txt @@ -0,0 +1,319 @@ +-eamer-slider==0.1.23 +absl-py==1.2.0 +aiosignal==1.2.0 +anyio==3.6.1 +appdirs==1.4.4 +apt-xapian-index==0.49 +argon2-cffi==21.1.0 +asgiref==3.5.2 +async-generator==1.10 +async-timeout==4.0.2 +attrs==21.2.0 +auth-code-flow==0.2.1 +babel==2.8.0 +backcall==0.2.0 +bcrypt==4.0.0 +beamer-slider==0.1.25.4 +beautifulsoup4==4.10.0 +beniget==0.4.1 +bidict==0.22.0 +bleach==4.1.0 +blinker==1.4 +brotli==1.0.9 +build==0.8.0 +cached-property==1.5.2 +cachetools==5.2.0 +certifi==2022.6.15 +cffi==1.15.1 +chardet==4.0.0 +click==8.0.4 +clize==4.2.1 +cloudpickle==2.2.0 +codesnipper==0.1.13 +colorama==0.4.4 +command-not-found==0.3 +commonmark==0.9.1 +coursebox==0.1.8 +coverage==6.4.4 +cryptography==3.4.8 +cssselect==1.1.0 +cupshelpers==1.0 +cvxpy==1.2.1 +cycler==0.11.0 +dateparser==1.1.1 +dbus-python==1.2.18 +decorator==4.4.2 +defer==1.0.6 +defusedxml==0.7.1 +deprecated==1.2.13 +diskcache==5.4.0 +distlib==0.3.4 +distro-info==1.1build1 +distro==1.7.0 +django-admin-views==0.8.0 +django-jquery==3.1.0 +django-tables2==2.4.1 +django-tinymce==3.5.0 +django==4.1 +dm-tree==0.1.7 +docker-compose==1.29.2 +docker-pycreds==0.4.0 +docker==5.0.3 +dockerpty==0.4.1 +docopt==0.6.2 +docutils==0.17.1 +dtufarm==0.0.13 +ecos==2.0.10 +entrypoints==0.4 +et-xmlfile==1.1.0 +exceptiongroup==1.0.0rc9 +fabric==2.7.1 +fastapi==0.85.0 +filelock==3.6.0 +flask-sock==0.5.2 +flask-socketio==5.3.0 +flask==2.2.2 +fonttools==4.29.1 +frozenlist==1.3.1 +fs==2.4.12 +gast==0.5.2 +gitdb==4.0.9 +gitpython==3.1.27 +google-auth-oauthlib==0.4.6 +google-auth==2.12.0 +gpg==1.16.0-unknown +grpcio==1.43.0 +gunicorn==20.1.0 +gym-notices==0.0.8 +gym==0.21.0 +h11==0.13.0 +html-testrunner==1.2.1 +html5lib==1.1 +httplib2==0.20.2 +idna==3.3 +imageio==2.22.0 +importlib-metadata==4.6.4 +importnb==0.7.0 +iniconfig==1.1.1 +invoke==1.7.1 +ipykernel==6.7.0 +ipython-genutils==0.2.0 +ipython==7.31.1 +ipywidgets==6.0.0 +itsdangerous==2.1.2 +jedi==0.18.0 +jeepney==0.7.1 +jinja2==3.1.2 +joblib==1.2.0 +jsonschema==3.2.0 +jupyter-client==7.1.2 +jupyter-core==4.9.1 +jupyterlab-pygments==0.1.2 +kazam==1.4.5 +keyring==23.5.0 +kiwisolver==1.3.2 +langdetect==1.0.9 +language-selector==0.1 +latexcodec==2.0.1 +launchpadlib==1.10.16 +layeredconfig==0.3.2 +lazr.restfulclient==0.14.4 +lazr.uri==1.0.6 +line-profiler-pycharm==1.1.0 +line-profiler==3.5.1 +lxml==4.8.0 +lz4==3.1.3+dfsg +markdown==3.4.1 +markupsafe==2.1.1 +matplotlib-inline==0.1.3 +matplotlib==3.5.1 +mazebase==0.0.1 +mistune==0.8.4 +more-itertools==8.10.0 +mosspy==1.0.9 +mpmath==0.0.0 +msgpack==1.0.4 +nbclient==0.5.6 +nbconvert==6.4.0 +nbformat==5.1.3 +nest-asyncio==1.5.4 +netifaces==0.11.0 +networkx==2.8.6 +notebook==6.4.8 +numpy==1.21.5 +oauthlib==3.2.0 +od==2.0.2 +olefile==0.46 +openpyxl==3.0.10 +osqp==0.6.2.post5 +outcome==1.2.0 +packaging==21.3 +pandas==1.4.3 +pandocfilters==1.5.0 +paramiko==2.11.0 +parso==0.8.1 +patchwork==1.0.1 +pathlib2==2.3.7.post1 +pathtools==0.1.2 +pbr==5.10.0 +pep517==0.13.0 +pettingzoo==1.20.1 +pexpect==4.8.0 +pickleshare==0.7.5 +pillow==9.0.1 +pip==22.0.2 +pkginfo==1.8.3 +platformdirs==2.5.1 +pluggy==0.13.0 +ply==3.11 +prometheus-client==0.9.0 +promise==2.3 +prompt-toolkit==3.0.28 +protobuf==3.19.5 +psutil==5.9.2 +psycopg2-binary==2.9.3 +ptyprocess==0.7.0 +pupdb==0.1.4 +py==1.10.0 +pyasn1-modules==0.2.8 +pyasn1==0.4.8 +pybtex==0.24.0 +pycairo==1.20.1 +pycode-similar==1.4 +pycparser==2.21 +pycups==2.0.1 +pydantic==1.10.2 +pyee==8.2.2 +pyfiglet==0.8.post1 +pygame==2.1.2 +pyglet==1.5.26 +pygments==2.11.2 +pygobject==3.42.1 +pyjwt==2.3.0 +pylatexenc==2.10 +pyminifier==2.1 +pynacl==1.5.0 +pyparsing==2.4.7 +pypdf2==2.10.8 +pyppeteer==1.0.2 +pyqt5-sip==12.9.1 +pyqt5==5.15.6 +pyrsistent==0.18.1 +pysocks==1.7.1 +pytest-html-reporter==0.2.9 +pytest==7.1.2 +python-apt==2.3.0+ubuntu2.1 +python-dateutil==2.8.1 +python-debian==0.1.43ubuntu1 +python-dotenv==0.19.2 +python-engineio==4.3.4 +python-socketio==5.7.1 +pythran==0.10.0 +pytz-deprecation-shim==0.1.0.post0 +pytz==2022.2.1 +pywavelets==1.4.1 +pyxdg==0.27 +pyyaml==5.4.1 +pyzmq==22.3.0 +qdldl==0.1.5.post2 +rauth==0.7.3 +ray==2.0.0 +readme-renderer==37.1 +redis==4.3.4 +regex==2022.3.2 +reportlab==3.6.8 +requests-oauthlib==1.3.1 +requests-toolbelt==0.9.1 +requests-unixsocket==0.2.0 +requests==2.25.1 +rfc3986==2.0.0 +rich==12.5.1 +roman==3.3 +rq==1.11.0 +rsa==4.9 +ruamel.yaml.clib==0.2.6 +ruamel.yaml==0.17.21 +scikit-image==0.19.3 +scikit-learn==1.1.2 +scipy==1.8.0 +scour==0.38.2 +scs==3.2.0 +seaborn==0.12.0 +secretstorage==3.3.1 +selenium==4.5.0 +send2trash==1.8.1b0 +sentry-sdk==1.9.9 +setproctitle==1.3.2 +setuptools==57.0.0 +sh==1.14.3 +shortuuid==1.0.9 +sigtools==4.0.0 +simple-websocket==0.8.0 +six==1.16.0 +sklearn==0.0 +smmap==5.0.0 +sniffio==1.2.0 +sortedcontainers==2.4.0 +soupsieve==2.3.1 +splinter==0.18.1 +sqlparse==0.4.2 +sse-starlette==1.1.6 +starlette==0.20.4 +stevedore==4.0.0 +sympy==1.9 +systemd-python==234 +tabulate==0.8.10 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.10.1 +terminado==0.13.1 +testpath==0.5.0 +texttable==1.6.4 +threadpoolctl==3.1.0 +tifffile==2022.8.12 +tika==1.24 +tinydb==4.7.0 +toml==0.10.2 +tomli==2.0.1 +torch==1.12.1 +tornado==6.1 +tox==3.21.4 +tqdm==4.64.0 +traitlets==5.1.1 +trio-websocket==0.9.2 +trio==0.21.0 +turbo-flask==0.8.0 +twine==4.0.1 +typing-extensions==4.3.0 +tzdata==2022.2 +tzlocal==4.2 +ubuntu-advantage-tools==27.11.2 +ubuntu-drivers-common==0.0.0 +ufolib2==0.13.1 +ufw==0.36.1 +unattended-upgrades==0.1 +unicodedata2==14.0.0 +unitgrade-devel==0.1.44 +unitgrade==0.1.30.1 +urllib3==1.26.12 +urlparser==0.1.2 +usb-creator==0.3.7 +virtualenv==20.13.0+ds +wadllib==1.3.6 +wandb==0.13.3 +watchdog==2.1.9 +wcwidth==0.2.5 +webdriver-manager==3.8.3 +webencodings==0.5.1 +websocket-client==1.2.3 +websockets==10.3 +werkzeug==2.2.2 +wexpect==2.3.2 +wheel==0.37.1 +widgetsnbextension==2.0.0 +wrapt==1.14.1 +wsproto==1.2.0 +xdg==5 +xkit==0.0.0 +xlwings==0.27.14 +zipp==1.0.0 \ No newline at end of file diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-metadata.json b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..07fe1014dd03f8518e37b16d497052d1423cb4bd --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-metadata.json @@ -0,0 +1,22 @@ +{ + "os": "Linux-5.15.0-48-generic-x86_64-with-glibc2.35", + "python": "3.10.6", + "heartbeatAt": "2022-10-18T16:57:37.287246", + "startedAt": "2022-10-18T16:57:35.204392", + "docker": null, + "cpu_count": 4, + "cuda": null, + "args": [], + "state": "running", + "program": "/home/tuhe/Documents/unitgrade/src/unitgrade/dashboard/ephermaltransfer.py", + "codePath": "src/unitgrade/dashboard/ephermaltransfer.py", + "git": { + "remote": "https://lab.compute.dtu.dk/tuhe/unitgrade.git", + "commit": "d1e994c820e51a57a1244a22e60122aaf0058428" + }, + "email": "tuhe@dtu.dk\\n", + "root": "/home/tuhe/Documents/unitgrade", + "host": "thp", + "username": "tuhe", + "executable": "/usr/bin/python3.10" +} diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-summary.json b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..d06bf966b418b520ddbfe4a68b58a4da722f4640 --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/files/wandb-summary.json @@ -0,0 +1 @@ +{"stuff": "good", "x": 0.3957734207619399, "_timestamp": 1666112257.3683903, "_runtime": 2.1377432346343994, "_step": 0, "_wandb": {"runtime": 0}} \ No newline at end of file diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/run-my_random_job.wandb b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/run-my_random_job.wandb new file mode 100644 index 0000000000000000000000000000000000000000..c4ccbf945299d1cdba839d04b39d39b0a9807038 Binary files /dev/null and b/src/unitgrade/dashboard/wandb/run-20221018_185735-my_random_job/run-my_random_job.wandb differ diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/config.yaml b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1040de4ef7c4bcbff5202f7fe2a239b63bc51b6a --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/config.yaml @@ -0,0 +1,23 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.13.3 + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.10.6 + start_time: 1666112376.196648 + t: + 1: + - 55 + 2: + - 55 + 3: + - 5 + - 14 + - 23 + 4: 3.10.6 + 5: 0.13.3 + 8: + - 5 diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/wandb-summary.json b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..11538a4e418943895e5e25e9f40f6ca56845a7f3 --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/files/wandb-summary.json @@ -0,0 +1 @@ +{"stuff": "good", "_runtime": 359.81810307502747, "_timestamp": 1666112733.353167, "x": 0.8731987630075321, "_step": 5} \ No newline at end of file diff --git a/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/run-my_random_job.wandb b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/run-my_random_job.wandb new file mode 100644 index 0000000000000000000000000000000000000000..b4d6aabbde936d780d01ede01a21f06b863143f4 Binary files /dev/null and b/src/unitgrade/dashboard/wandb/run-20221018_185936-my_random_job/run-my_random_job.wandb differ diff --git a/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/config.yaml b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14756f28965d5524d36a2a28aec7929ee911697b --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/config.yaml @@ -0,0 +1,23 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.13.3 + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.10.6 + start_time: 1666112803.606044 + t: + 1: + - 55 + 2: + - 55 + 3: + - 5 + - 14 + - 23 + 4: 3.10.6 + 5: 0.13.3 + 8: + - 5 diff --git a/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/wandb-summary.json b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e498e10f83c633dbb43a215059d3e6873206fb53 --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/files/wandb-summary.json @@ -0,0 +1 @@ +{"_step": 2, "stuff": "good", "_runtime": 66.91523838043213, "_timestamp": 1666112807.7102654, "x": 0.6982837655905743, "_wandb": {"runtime": 0}} \ No newline at end of file diff --git a/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/run-my_random_job.wandb b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/run-my_random_job.wandb new file mode 100644 index 0000000000000000000000000000000000000000..4712d6d82fa4e3bdc7bd1f7a6adfaf45a91d0a22 Binary files /dev/null and b/src/unitgrade/dashboard/wandb/run-20221018_190643-my_random_job/run-my_random_job.wandb differ diff --git a/src/unitgrade/dashboard/wandb/run-20221018_195338-my_random_job/files/config.yaml b/src/unitgrade/dashboard/wandb/run-20221018_195338-my_random_job/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3dba31d29b0947a13b3799b86e6c84ca55c5059 --- /dev/null +++ b/src/unitgrade/dashboard/wandb/run-20221018_195338-my_random_job/files/config.yaml @@ -0,0 +1,20 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.13.3 + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.10.6 + start_time: 1666115618.482377 + t: + 1: + - 55 + 3: + - 14 + - 23 + 4: 3.10.6 + 5: 0.13.3 + 8: + - 5 diff --git a/src/unitgrade/dashboard/wandb/run-20221018_195338-my_random_job/run-my_random_job.wandb b/src/unitgrade/dashboard/wandb/run-20221018_195338-my_random_job/run-my_random_job.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/unitgrade/evaluate.py b/src/unitgrade/evaluate.py index 1ad21fef2fd529a7bef8c192e000eedeb03eebe3..728577eb4c75bec870dd33ea02606d7d0bc13845 100644 --- a/src/unitgrade/evaluate.py +++ b/src/unitgrade/evaluate.py @@ -178,8 +178,9 @@ def evaluate_report(report, question=None, qitem=None, passall=False, verbose=Fa else: raise Exception("Status not known.") - nice_title = s.title - detail = {**detail, **msg, 'nice_title': nice_title}#['message'] = msg + # s can be an '_ErrorHolder' object, which has no title. + nice_title = s.title if hasattr(s, 'title') else 's has no title; unitgrade/evaluate.py line 181' + detail = {**detail, **msg, 'nice_title': nice_title} #['message'] = msg details[key] = detail # q_[s._testMethodName] = ("pass", None) diff --git a/src/unitgrade/framework.py b/src/unitgrade/framework.py index 9d5ef1886a2b4a30cff1bb766d372f37aa1bac0f..125ed19b1f415bec2f13945cd82d246011461c6c 100644 --- a/src/unitgrade/framework.py +++ b/src/unitgrade/framework.py @@ -294,9 +294,30 @@ def get_hints(ss): except Exception as e: print("bad hints", ss, e) +from threading import Thread +class WandUpload(Thread): + # - What do you want to know? What might be of help? + # - What errors occur + # - How many times each test is run, and how many times it fails + # - What kind of errors occur in the tests + # - timestamps + # For each test, track the number of runs and the different errors + # For each test, track which errors many have in common. + + def run(self): + + pass + + pass class UTestCase(unittest.TestCase): # a = 234 + + api = "053eccb9234af62a683b5733d8c00138ed601a43" # secret key + # How should it work? + # Sync errors online. + + _outcome = None # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache. _cache = None # Read-only cache. Ensures method always produce same result. _cache2 = None # User-written cache. @@ -333,19 +354,16 @@ class UTestCase(unittest.TestCase): from unitgrade.artifacts import StdCapturing from unitgrade.utils import DKPupDB - db = DKPupDB(self._artifact_file()) + db = DKPupDB(self._artifact_file(), register_ephemeral=True) db.set("state", "running") db.set('run_id', np.random.randint(1000*1000)) db.set('coverage_files_changed', None) - _stdout = sys.stdout _stderr = sys.stderr std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False) - # stderr_capture = StdCapturing(sys.stderr, db=db) - # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db) state_ = None try: # Run this unittest and record all of the output. @@ -354,9 +372,20 @@ class UTestCase(unittest.TestCase): sys.stderr = std_capture.dummy_stderr sys.stdout = std_capture.dummy_stdout - result_ = TestCase.run(self, result) + # db.get('stdout') + # db.get('stderr') + # db.get("history") + + result_ = TestCase.run(self, result) from werkzeug.debug.tbtools import DebugTraceback, _process_traceback + + + # What could be nice to upload? + # When the files are edited? + # When tests are run? + # how to register it? (report, question, user...) + # # print(result_._excinfo[0]) actual_errors = [] for test, err in self._error_fed_during_run: @@ -364,17 +393,16 @@ class UTestCase(unittest.TestCase): continue else: import traceback - # traceback.print_tb(err[2]) actual_errors.append(err) if len(actual_errors) > 0: ex, exi, tb = actual_errors[0] exi.__traceback__ = tb dbt = DebugTraceback(exi) + sys.stderr.write(dbt.render_traceback_text()) html = dbt.render_traceback_html(include_title="hello world") db.set('wz_stacktrace', html) - # db.set('state', 'fail') state_ = "fail" else: state_ = "pass" @@ -400,11 +428,12 @@ class UTestCase(unittest.TestCase): self.cov.start() self.setUp() + def _callTearDown(self): self.tearDown() - # print("Teardown.") + # print("TEaring down.") if self._with_coverage: - # print("with cov") + # print("TEaring down with coverage") from pathlib import Path from snipper import snipper_main try: @@ -415,17 +444,16 @@ class UTestCase(unittest.TestCase): data = self.cov.get_data() base, _, _ = self._report._import_base_relative() for file in data.measured_files(): + print(file) file = os.path.normpath(file) root = Path(base) child = Path(file) if root in child.parents: - # print("Reading file", child) with open(child, 'r') as f: s = f.read() lines = s.splitlines() garb = 'GARBAGE' lines2 = snipper_main.censor_code(lines, keep=True) - # print("\n".join(lines2)) if len(lines) != len(lines2): for k in range(len(lines)): print(k, ">", lines[k], "::::::::", lines2[k]) @@ -436,9 +464,11 @@ class UTestCase(unittest.TestCase): assert len(lines) == len(lines2) for ll in data.contexts_by_lineno(file): + l = ll-1 + print(l, lines2[l]) if l < len(lines2) and lines2[l].strip() == garb: - # print("Got a hit at l", l) + print("Got one.") rel = os.path.relpath(child, root) cc = self._covcache j = 0 @@ -452,7 +482,8 @@ class UTestCase(unittest.TestCase): if rel not in cc: cc[rel] = {} cc[rel][fun] = (l, "\n".join(comments)) - # print("found", rel, fun) + print("found", rel, fun) + # print(file, ll) self._cache_put((self.cache_id(), 'coverage'), self._covcache) def shortDescriptionStandard(self): @@ -660,7 +691,7 @@ class UTestCase(unittest.TestCase): return file def _artifact_file(self): - """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file. + """ File for the artifacts DB (thread safe). This file is optinal. Note the file is shared between all sub-questions. """ return os.path.join(os.path.dirname(self.__class__._cache_file()), '-'.join(self.cache_id()) + ".json") diff --git a/src/unitgrade/utils.py b/src/unitgrade/utils.py index 3a512252a7135bb09bcfc6bc2970e13734bbcbe9..bc0650e199b564c241e8e2ba27ca6740fa2ce0fe 100644 --- a/src/unitgrade/utils.py +++ b/src/unitgrade/utils.py @@ -305,13 +305,14 @@ def load_token(file_in): ## Key/value store related. class DKPupDB: """ This key/value store store artifacts (associated with a specific question) in a dictionary. """ - def __init__(self, artifact_file, use_pupdb=False): + def __init__(self, artifact_file, use_pupdb=False, register_ephemeral=False): # Make a double-headed disk cache thingy. self.dk = Cache(os.path.dirname(artifact_file)) # Start in this directory. self.name_ = os.path.basename(artifact_file[:-5]) if self.name_ not in self.dk: self.dk[self.name_] = dict() self.use_pupdb = use_pupdb + self.register_ephemeral = register_ephemeral if self.use_pupdb: from pupdb.core import PupDB self.db_ = PupDB(artifact_file) @@ -323,6 +324,7 @@ class DKPupDB: d = self.dk[self.name_] d[key] = value self.dk[self.name_] = d + self.dk.set(key=np.random.randint(0, high=1e8), value=(key,value), tag="ephemeral") self.dk[self.name_ + "-updated"] = True def __getitem__(self, item): diff --git a/src/unitgrade/version.py b/src/unitgrade/version.py index 239dc851b9a672d388c10e91905f79f3a611e5e4..91eab43867c2bb3f94f948ef1211c741c1e26d58 100644 --- a/src/unitgrade/version.py +++ b/src/unitgrade/version.py @@ -1 +1 @@ -__version__ = "0.1.30.0" \ No newline at end of file +__version__ = "0.1.30.2" \ No newline at end of file