Coverage for C:\Users\tuhe\Documents\unitgrade\unitgrade2\unitgrade_helpers2.py : 69%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import numpy as np
2from tabulate import tabulate
3from datetime import datetime
4import pyfiglet
5from unitgrade2 import Hidden, myround, msum, mfloor, ActiveProgress
6from unitgrade2 import __version__
7import unittest
8from unitgrade2.unitgrade2 import MySuite
9from unitgrade2.unitgrade2 import UTextResult
11import inspect
12import os
13import argparse
14import sys
15import time
16import threading # don't import Thread bc. of minify issue.
17import tqdm # don't do from tqdm import tqdm because of minify-issue
19parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example:
20To run all tests in a report:
22> python assignment1_dp.py
24To run only question 2 or question 2.1
26> python assignment1_dp.py -q 2
27> python assignment1_dp.py -q 2.1
29Note this scripts does not grade your report. To grade your report, use:
31> python report1_grade.py
33Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
34For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
36> python -m course_package.report1
38see https://docs.python.org/3.9/using/cmdline.html
39""", formatter_class=argparse.RawTextHelpFormatter)
40parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)')
41parser.add_argument('--showexpected', action="store_true", help='Show the expected/desired result')
42parser.add_argument('--showcomputed', action="store_true", help='Show the answer your code computes')
43parser.add_argument('--unmute', action="store_true", help='Show result of print(...) commands in code')
44parser.add_argument('--passall', action="store_true", help='Automatically pass all tests. Useful when debugging.')
47def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):
48 args = parser.parse_args()
49 if question is None and args.q is not None:
50 question = args.q
51 if "." in question:
52 question, qitem = [int(v) for v in question.split(".")]
53 else:
54 question = int(question)
56 if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
57 raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")
59 if unmute is None:
60 unmute = args.unmute
61 if passall is None:
62 passall = args.passall
64 results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
65 show_tol_err=show_tol_err)
68 # try: # For registering stats.
69 # import unitgrade_private
70 # import irlc.lectures
71 # import xlwings
72 # from openpyxl import Workbook
73 # import pandas as pd
74 # from collections import defaultdict
75 # dd = defaultdict(lambda: [])
76 # error_computed = []
77 # for k1, (q, _) in enumerate(report.questions):
78 # for k2, item in enumerate(q.items):
79 # dd['question_index'].append(k1)
80 # dd['item_index'].append(k2)
81 # dd['question'].append(q.name)
82 # dd['item'].append(item.name)
83 # dd['tol'].append(0 if not hasattr(item, 'tol') else item.tol)
84 # error_computed.append(0 if not hasattr(item, 'error_computed') else item.error_computed)
85 #
86 # qstats = report.wdir + "/" + report.name + ".xlsx"
87 #
88 # if os.path.isfile(qstats):
89 # d_read = pd.read_excel(qstats).to_dict()
90 # else:
91 # d_read = dict()
92 #
93 # for k in range(1000):
94 # key = 'run_'+str(k)
95 # if key in d_read:
96 # dd[key] = list(d_read['run_0'].values())
97 # else:
98 # dd[key] = error_computed
99 # break
100 #
101 # workbook = Workbook()
102 # worksheet = workbook.active
103 # for col, key in enumerate(dd.keys()):
104 # worksheet.cell(row=1, column=col+1).value = key
105 # for row, item in enumerate(dd[key]):
106 # worksheet.cell(row=row+2, column=col+1).value = item
107 #
108 # workbook.save(qstats)
109 # workbook.close()
110 #
111 # except ModuleNotFoundError as e:
112 # s = 234
113 # pass
115 if question is None:
116 print("Provisional evaluation")
117 tabulate(table_data)
118 table = table_data
119 print(tabulate(table))
120 print(" ")
122 fr = inspect.getouterframes(inspect.currentframe())[1].filename
123 gfile = os.path.basename(fr)[:-3] + "_grade.py"
124 if os.path.exists(gfile):
125 print("Note your results have not yet been registered. \nTo register your results, please run the file:")
126 print(">>>", gfile)
127 print("In the same manner as you ran this file.")
130 return results
133def upack(q):
134 # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()])
135 h =[(i['w'], i['possible'], i['obtained']) for i in q.values()]
136 h = np.asarray(h)
137 return h[:,0], h[:,1], h[:,2],
139class UnitgradeTextRunner(unittest.TextTestRunner):
140 def __init__(self, *args, **kwargs):
141 super().__init__(*args, **kwargs)
143class SequentialTestLoader(unittest.TestLoader):
144 def getTestCaseNames(self, testCaseClass):
145 test_names = super().getTestCaseNames(testCaseClass)
146 testcase_methods = list(testCaseClass.__dict__.keys())
147 test_names.sort(key=testcase_methods.index)
148 return test_names
150def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
151 show_progress_bar=True,
152 show_tol_err=False):
153 from unitgrade2.version import __version__
154 now = datetime.now()
155 ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
156 b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
157 print(b + " v" + __version__)
158 dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
159 print("Started: " + dt_string)
160 s = report.title
161 if hasattr(report, "version") and report.version is not None:
162 s += " version " + report.version
163 print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")
164 # print(f"Loaded answers from: ", report.computed_answers_file, "\n")
165 table_data = []
166 nL = 80
167 t_start = time.time()
168 score = {}
169 # Use the sequential test loader instead. See here:
170 loader = SequentialTestLoader()
171 # loader = unittest.TestLoader()
172 # loader.suiteClass = MySuite
174 for n, (q, w) in enumerate(report.questions):
175 # q = q()
176 q_hidden = False
177 # q_hidden = issubclass(q.__class__, Hidden)
178 if question is not None and n+1 != question:
179 continue
180 suite = loader.loadTestsFromTestCase(q)
181 # print(suite)
182 qtitle = q.__name__
183 # qtitle = q.title if hasattr(q, "title") else q.id()
184 # q.title = qtitle
185 q_title_print = "Question %i: %s"%(n+1, qtitle)
186 print(q_title_print, end="")
187 q.possible = 0
188 q.obtained = 0
189 q_ = {} # Gather score in this class.
190 from unitgrade2.unitgrade2 import UTextTestRunner
191 # unittest.Te
192 # q_with_outstanding_init = [item.question for item in q.items if not item.question.has_called_init_]
193 UTextResult.q_title_print = q_title_print # Hacky
194 res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
195 # res = UTextTestRunner(verbosity=2, resultclass=unittest.TextTestResult).run(suite)
196 z = 234
197 # for j, item in enumerate(q.items):
198 # if qitem is not None and question is not None and j+1 != qitem:
199 # continue
200 #
201 # if q_with_outstanding_init is not None: # check for None bc. this must be called to set titles.
202 # # if not item.question.has_called_init_:
203 # start = time.time()
204 #
205 # cc = None
206 # if show_progress_bar:
207 # total_estimated_time = q.estimated_time # Use this. The time is estimated for the q itself. # sum( [q2.estimated_time for q2 in q_with_outstanding_init] )
208 # cc = ActiveProgress(t=total_estimated_time, title=q_title_print)
209 # from unitgrade import Capturing # DON'T REMOVE THIS LINE
210 # with eval('Capturing')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.
211 # try:
212 # for q2 in q_with_outstanding_init:
213 # q2.init()
214 # q2.has_called_init_ = True
215 #
216 # # item.question.init() # Initialize the question. Useful for sharing resources.
217 # except Exception as e:
218 # if not passall:
219 # if not silent:
220 # print(" ")
221 # print("="*30)
222 # print(f"When initializing question {q.title} the initialization code threw an error")
223 # print(e)
224 # print("The remaining parts of this question will likely fail.")
225 # print("="*30)
226 #
227 # if show_progress_bar:
228 # cc.terminate()
229 # sys.stdout.flush()
230 # print(q_title_print, end="")
231 #
232 # q_time =np.round( time.time()-start, 2)
233 #
234 # print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")
235 # print("=" * nL)
236 # q_with_outstanding_init = None
237 #
238 # # item.question = q # Set the parent question instance for later reference.
239 # item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)
240 #
241 # if show_progress_bar:
242 # cc = ActiveProgress(t=item.estimated_time, title=item_title_print)
243 # else:
244 # print(item_title_print + ( '.'*max(0, nL-4-len(ss)) ), end="")
245 # hidden = issubclass(item.__class__, Hidden)
246 # # if not hidden:
247 # # print(ss, end="")
248 # # sys.stdout.flush()
249 # start = time.time()
250 #
251 # (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)
252 # q_[j] = {'w': item.weight, 'possible': possible, 'obtained': current, 'hidden': hidden, 'computed': str(item._computed_answer), 'title': item.title}
253 # tsecs = np.round(time.time()-start, 2)
254 # if show_progress_bar:
255 # cc.terminate()
256 # sys.stdout.flush()
257 # print(item_title_print + ('.' * max(0, nL - 4 - len(ss))), end="")
258 #
259 # if not hidden:
260 # ss = "PASS" if current == possible else "*** FAILED"
261 # if tsecs >= 0.1:
262 # ss += " ("+ str(tsecs) + " seconds)"
263 # print(ss)
265 # ws, possible, obtained = upack(q_)
267 possible = res.testsRun
268 obtained = len(res.successes)
270 assert len(res.successes) + len(res.errors) + len(res.failures) == res.testsRun
272 # possible = int(ws @ possible)
273 # obtained = int(ws @ obtained)
274 # obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0
276 obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
277 score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': q_, 'title': qtitle}
278 q.obtained = obtained
279 q.possible = possible
281 s1 = f"*** Question q{n+1}"
282 s2 = f" {q.obtained}/{w}"
283 print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )
284 print(" ")
285 table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])
287 ws, possible, obtained = upack(score)
288 possible = int( msum(possible) )
289 obtained = int( msum(obtained) ) # Cast to python int
290 report.possible = possible
291 report.obtained = obtained
292 now = datetime.now()
293 dt_string = now.strftime("%H:%M:%S")
295 dt = int(time.time()-t_start)
296 minutes = dt//60
297 seconds = dt - minutes*60
298 plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
300 print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")
302 table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
303 results = {'total': (obtained, possible), 'details': score}
304 return results, table_data