diff --git a/src/unitgrade_private/deployment.py b/src/unitgrade_private/deployment.py index 90d365e87ac0a08a342df56a9792afaa7855dbab..e228e7dc9f1506b0e5e00c799cebc316cdefad6c 100644 --- a/src/unitgrade_private/deployment.py +++ b/src/unitgrade_private/deployment.py @@ -4,6 +4,38 @@ import os import importlib import snipper +# import re, inspect +# +# FUNC_BODY = re.compile('^(?P<tabs>[\t ]+)?def (?P<name>[a-zA-Z0-9_]+)([^\n]+)\n(?P<body>(^([\t ]+)?([^\n]+)\n)+)', re.M) +# +# class Source(object): +# @staticmethod +# def investigate(focus: object, strfocus: str) -> str: +# with open(inspect.getsourcefile(focus), 'r') as f: +# for m in FUNC_BODY.finditer(f.read()): +# if m.group('name') == strfocus: +# tabs = m.group('tabs') if not m.group('tabs') is None else '' +# return f"{tabs}'''\n{m.group('body')}{tabs}'''" +# +# +# def decorator(func): +# def inner(): +# print("I'm decorated") +# func() +# +# return inner +# +# +# @decorator +# def test(): +# a = 5 +# b = 6 +# return a + b +# +# +# print(Source.investigate(test, 'test')) + + def remove_hidden_methods(ReportClass, outfile=None): # Given a ReportClass, clean out all @hidden tests from the imports of that class. file = ReportClass()._file() @@ -12,14 +44,24 @@ def remove_hidden_methods(ReportClass, outfile=None): lines_to_rem = [] + for l in source: + if l.strip().startswith("@hide"): + print(l) + + for Q,_ in ReportClass.questions: ls = list(methodsWithDecorator(Q, hide)) # print("hide decorateed is", ls) for f in ls: + assert inspect.getsourcefile(f) == file, "You must apply the @hide decorator as the inner-most decorator, i.e., just above the function you wish to remove." + s, start = inspect.getsourcelines(f) end = len(s) + start lines_to_rem += list(range(start-1, end-1)) + print("All hidden funcs") + print(ls) + source = list([l for k, l in enumerate(source) if k not in lines_to_rem]) source = "\n".join(source) @@ -31,9 +73,12 @@ def remove_hidden_methods(ReportClass, outfile=None): # Allows us to use the !b;silent tags in the code. This is a bit hacky, but allows timeouts, etc. to make certain tests more robust from snipper.fix_bf import fix_b - lines, _, _ = fix_b(source.splitlines()) - source = "\n".join(lines) + from snipper.snipper_main import fix_tags + from snipper.snip_dir import snip_dir + lines, _, _ = fix_b(fix_tags(source.rstrip().splitlines())) + source = "\n".join(lines) + # print(source) with open(os.path.dirname(file) + "/" + outfile, 'w') as f: f.write(source) diff --git a/src/unitgrade_private/pipelines/dtulearn.py b/src/unitgrade_private/pipelines/dtulearn.py index c447926eaccc682077e21312f0b15c504bb9d3e9..408df31074f283d5cdc9b7121d1f9cad80ed799d 100644 --- a/src/unitgrade_private/pipelines/dtulearn.py +++ b/src/unitgrade_private/pipelines/dtulearn.py @@ -324,6 +324,9 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad # configuration """ Unpack token or prep python files. """ for fid in glob.glob(stage2_dir + "/*"): + if "s234792" in fid: + print(fid) + # print(fid) id, type = os.path.basename(fid).split("-") s3dir = f"{stage3_dir}/{os.path.basename(fid)}" @@ -361,10 +364,11 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad if len(fn) == 0: print("I was unable to locate", g) print("Bad?") - os.path.relpath(fn[0], student_handout_folder) - - dst = s3dir + "/"+os.path.dirname(grade_script_relative) + "/"+ os.path.basename(g) - dst = s3dir + "/" + os.path.relpath(fn[0], student_handout_folder) + # os.path.relpath(fn[0], student_handout_folder) + dst = os.path.relpath(g, fid) # Take it relative to the currnet directory. + else: + # dst = s3dir + "/"+os.path.dirname(grade_script_relative) + "/"+ os.path.basename(g) + dst = s3dir + "/" + os.path.relpath(fn[0], student_handout_folder) if os.path.isfile(dst): shutil.copy(g, dst) @@ -417,6 +421,8 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad conf = configuration.get('stage3', {}) for fid in glob.glob(stage3_dir + "/*"): + # if "s234792" in fid: + # print(fid) s4dir = f"{stage4_dir}/{os.path.basename(fid)}" grade_script_relative = get_grade_script_location(instructor_grade_script) grade_script_destination = os.path.dirname(fid + "/" + grade_script_relative) + "/" + os.path.basename(instructor_grade_script) @@ -441,6 +447,7 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad elif len(products) == 1: rc = load_token(products[0])[0] + if len(products) == 0: # No .token file has actually been generated. So obviously we have to re-generate it. RERUN_TOKEN = True elif len(student_token_file) > 0 and id not in configuration.get('stage2', {}).get('skip_students', []): @@ -459,6 +466,11 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad print("no sources") ptoken = load_token(products[0])[0] + + rename_map = conf.get('rename_items', {}) # Why give them a single test when I can sit on my ass and give them incompatible tests, WCGW? + for q in stoken['details']: + stoken['details'][q]['items'] = {rename_map.get(k, k): v for k, v in stoken['details'][q]['items'].items()} + if ".".join(stoken['sources'][0]['report_module_specification']).lower().replace(" ", "") == ".".join(ptoken['sources'][0]['report_module_specification']).replace("_tests_complete", "").lower(): # s_better_than_i, _ = determine_token_difference(stoken, rc) acceptable_broken = False @@ -469,16 +481,26 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad else: print(".".join(stoken['sources'][0]['report_module_specification']).lower()) print(".".join(rc['sources'][0]['report_module_specification']).replace("_tests_complete", "").lower()) - - raise Exception("Bad student token. Add id incompatible token names " + str(student_token_file) ) - pass + messages['stage3'].append(f"{id}> Bad student token. Add id incompatible token names ['stage3']['accept_incompatible_token_names']. This likely occured because the student renamed the grade script. " + str(student_token_file)) + RERUN_TOKEN = True # Not hat it really helps. + acceptable_broken = True if len(s_better_than_i) > 0: for q in s_better_than_i: for item in s_better_than_i[q]['items']: + if item == ('Week06SentimentAnalysis', 'test_sentiment_analysis'): + print("Yes we were better but it had to do with idiotic sentiment analysis thanks a fuck...") + continue messages['stage3'].append(f"{id}> ERROR: Student strictly better than instructor. q{q}. item: {item}") RERUN_TOKEN = True + # for q in stoken['details']: + # print(stoken['details'][q]['name'], ptoken['details'][q]['name'] ) + # + # print(stoken['details'][5] ) + # print( ptoken['details'][5] ) + + rch = token_gather_hidden(rc) for q in stoken['details']: @@ -494,9 +516,12 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad if item not in rch['details'][q]['items']: print( rch['details'][q]['items'].keys() ) + + # print(rch['details'][q]['items'].keys()) + iitems = rch['details'][q]['items'][item] - if sitem['status'] == 'pass' and not all([i['status'] == 'pass' for i in iitems]) and id not in conf.get('verified_problematic_items', {}).get(item, []): + if sitem['status'] == 'pass' and not all([i['status'] == 'pass' for i in iitems]) and id not in conf.get('verified_problematic_items', {}).get(item, []) and not conf.get("accept_public_ok_hidden_failed", False): # print('disagreement found.') iitems = rch['details'][q]['items'][item] fails = [i['nice_title'] for i in iitems if i['status'] != 'pass'] @@ -530,9 +555,9 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad # a '234 import filecmp - if not filecmp.cmp(instructor_grade_script, grade_script_destination, shallow=False): + if not filecmp.cmp(instructor_grade_script, grade_script_destination, shallow=False) and not conf.get("forgive_changed_grade_script", False): print("grade script has been updated subsequently. Rerunning the tests...") - messages['stage3'].append(f"Rerunning token bc. of new grade script {grade_script_destination}") + messages['stage3'].append(f"{id}> Rerunning token bc. of new grade script {grade_script_destination}") RERUN_TOKEN = True else: continue @@ -571,6 +596,8 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad a = 234 from unitgrade.utils import Capturing2, Capturing, Logger + # from spb.defaults import * # spb / defaults.py + if unmute: # This is a pretty big mess. from unitgrade_private.run import run out = run(fcom, print_output=True, log_output=False, check=False) @@ -650,24 +677,26 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad if len(p_best) > 0: for q in p_best.values(): for item in q['items']: - messages['report'].append(f"{id}> Evaluation of student code was better than the token file evaluation. " + str(item) ) # + " student stderr: \n" + str(q['items'][item]['a']['stderr']) + "\n instructor stderr: \n" + str(q['items'][item]['b']['stderr'])) + if not configuration.get("stage_report", {}).get("accept_student_code_better_than_token", False): + messages['report'].append(f"{id}> Evaluation of student code (i.e. .py handins) was better than the token file evaluation. " + str(item) ) # + " student stderr: \n" + str(q['items'][item]['a']['stderr']) + "\n instructor stderr: \n" + str(q['items'][item]['b']['stderr'])) elif 'token' in found_students[id] and 'python' not in found_students[id]: pass elif 'token' not in found_students[id] and 'python' in found_students[id]: if id not in configuration.get('stage_report', {}).get("python_handin_checked", []): - print("="*50) - s = f"{id}> only handed in the .py files and not the .token files. " +str(found_students[id]['python'] + " to skip this mesage, alter the stage_report['python_handin_checked'] field. ") - messages['report'].append(s) - stoken =token_gather_hidden(load_token(found_students[id]['python'])[0]) - print(s) - dd = defaultdict(list) - for q in stoken['details']: - for item in stoken['details'][q]['items']: - # print(item, stoken['details'][q]['items'][item][0]['status']) - dd['test'].append(item) - dd['status'].append(stoken['details'][q]['items'][item][0]['status']) - print(tabulate.tabulate(dd, headers='keys')) + if not configuration.get("stage_report", {}).get("accept_only_py_no_token", False): + print("=" * 50) + s = f"{id}> only handed in the .py files and not the .token files. " +str(found_students[id]['python'] + " to skip this mesage, alter the stage_report['python_handin_checked'] field. ") + messages['report'].append(s) + stoken =token_gather_hidden(load_token(found_students[id]['python'])[0]) + print(s) + dd = defaultdict(list) + for q in stoken['details']: + for item in stoken['details'][q]['items']: + # print(item, stoken['details'][q]['items'][item][0]['status']) + dd['test'].append(item) + dd['status'].append(stoken['details'][q]['items'][item][0]['status']) + print(tabulate.tabulate(dd, headers='keys')) @@ -682,17 +711,29 @@ def docker_stagewise_evaluation(base_directory, Dockerfile=None, instructor_grad return rs rs = _stage_report() + + all_msgs = [] if len(messages) > 0: print("=" * 50) print("Oy veh, there are messages") for stage in messages: print("Messages from", stage) for s in messages[stage]: - print(">> ", s) + print(m_ := ">> "+ s) + all_msgs.append(m_) print("-" * 50) - if accept_problems: + + if not accept_problems: assert False, "No messages allowed!" + + with open(base_directory +"/log.txt", "w") as f: + f.write("\n".join(all_msgs)) + + # rs['messages'] = messages + + # with open() + if plagiarism_check and True: from unitgrade_private.plagiarism.mossit import moss_it2023 moss_it2023(submissions_base_dir=stage4_dir, submissions_pattern="*-token", instructor_grade_script=instructor_grade_script) @@ -759,7 +800,7 @@ def docker_verify_projects(learn_zip_file_path, Dockerfile=None, instructor_grad Dockerfile = images['unitgrade-docker'] # info = class_information() # Dockerfile = paths['02450instructors'] + "/docker/Dockerfile" - tag = compile_docker_image(Dockerfile) + tag = compile_docker_image(Dockerfile, verbose=True) print("Docker verify project image tag:", tag) if not os.path.isdir(dzip + "/verified_tokens"): diff --git a/src/unitgrade_private/plagiarism/mossit.py b/src/unitgrade_private/plagiarism/mossit.py index c6d7ab078b3262cb4ccd5c46c9906922aa7fc266..5d0386e4555966182adf821bc3fe119a2e5825de 100644 --- a/src/unitgrade_private/plagiarism/mossit.py +++ b/src/unitgrade_private/plagiarism/mossit.py @@ -93,7 +93,8 @@ def moss_it2023(submissions_base_dir=None, submissions_pattern="*-token", whitel for q in cov_files: for i in cov_files[q].values(): for g in i: - shutil.copy(f"{tmpdirname}/{g}", f"{sdir}/{os.path.basename(g)}") + if os.path.isfile(student_file := f"{tmpdirname}/{g}"): + shutil.copy(student_file, f"{sdir}/{os.path.basename(g)}") # Now submit it to moss. import mosspy diff --git a/src/unitgrade_private/version.py b/src/unitgrade_private/version.py index 3d3c3e1a42015ff0c4aa61a233543461962de909..5c2098c4689bbe95e6e068ea2f6a6ac579a3416e 100644 --- a/src/unitgrade_private/version.py +++ b/src/unitgrade_private/version.py @@ -1,5 +1 @@ -<<<<<<< HEAD __version__ = "0.1.62" -======= -__version__ = "0.1.61" ->>>>>>> 05f66ebff680f2f18cf59f9adf46bc6c8ecea3c0