Select Git revision
cs101report1_grade.py

tuhe authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
cs101report1_grade.py 50.42 KiB
'''WARNING: Modifying, decompiling or otherwise tampering with this script, it's data or the resulting .token file will be investigated as a cheating attempt.'''
import numpy as np
iFlPxdyCqBrgMNDzVocS=str
iFlPxdyCqBrgMNDzVocQ=None
iFlPxdyCqBrgMNDzVocY=False
iFlPxdyCqBrgMNDzVocT=int
iFlPxdyCqBrgMNDzVock=Exception
iFlPxdyCqBrgMNDzVoct=enumerate
iFlPxdyCqBrgMNDzVocs=hasattr
iFlPxdyCqBrgMNDzVocL=dict
iFlPxdyCqBrgMNDzVocA=range
iFlPxdyCqBrgMNDzVocu=list
iFlPxdyCqBrgMNDzVocR=print
iFlPxdyCqBrgMNDzVocG=True
iFlPxdyCqBrgMNDzVocO=len
iFlPxdyCqBrgMNDzVocI=issubclass
iFlPxdyCqBrgMNDzVocW=eval
iFlPxdyCqBrgMNDzVoce=max
iFlPxdyCqBrgMNDzVocH=getattr
iFlPxdyCqBrgMNDzVocX=open
iFlPxdyCqBrgMNDzVojf=globals
iFlPxdyCqBrgMNDzVojE=bytes
iFlPxdyCqBrgMNDzVocf=np.round
iFlPxdyCqBrgMNDzVoEX=np.asarray
from tabulate import tabulate
from datetime import datetime
iFlPxdyCqBrgMNDzVocE=datetime.now
import pyfiglet
iFlPxdyCqBrgMNDzVocj=pyfiglet.figlet_format
import inspect
iFlPxdyCqBrgMNDzVocK=inspect.currentframe
iFlPxdyCqBrgMNDzVocb=inspect.getouterframes
import os
iFlPxdyCqBrgMNDzVocn=os.getcwd
iFlPxdyCqBrgMNDzVocv=os.walk
iFlPxdyCqBrgMNDzVocw=os.path
import argparse
iFlPxdyCqBrgMNDzVocp=argparse.RawTextHelpFormatter
iFlPxdyCqBrgMNDzVoca=argparse.ArgumentParser
import sys
iFlPxdyCqBrgMNDzVocJ=sys.stdout
import time
iFlPxdyCqBrgMNDzVocm=time.time
import threading
import tqdm
iFlPxdyCqBrgMNDzVofE=iFlPxdyCqBrgMNDzVoca(description='Evaluate your report.',epilog="""Example:
To run all tests in a report:
> python assignment1_dp.py
To run only question 2 or question 2.1
> python assignment1_dp.py -q 2
> python assignment1_dp.py -q 2.1
Note this scripts does not grade your report. To grade your report, use:
> python report1_grade.py
Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
> python -m course_package.report1
see https://docs.python.org/3.9/using/cmdline.html
""", formatter_class=iFlPxdyCqBrgMNDzVocp)
iFlPxdyCqBrgMNDzVofE.add_argument('-q',nargs='?',type=iFlPxdyCqBrgMNDzVocS,default=iFlPxdyCqBrgMNDzVocQ,help='Only evaluate this question (e.g.: -q 2)')
iFlPxdyCqBrgMNDzVofE.add_argument('--showexpected',action="store_true",help='Show the expected/desired result')
iFlPxdyCqBrgMNDzVofE.add_argument('--showcomputed',action="store_true",help='Show the answer your code computes')
iFlPxdyCqBrgMNDzVofE.add_argument('--unmute',action="store_true",help='Show result of print(...) commands in code')
iFlPxdyCqBrgMNDzVofE.add_argument('--passall',action="store_true",help='Automatically pass all tests. Useful when debugging.')
def iFlPxdyCqBrgMNDzVoER(iFlPxdyCqBrgMNDzVofu,question=iFlPxdyCqBrgMNDzVocQ,qitem=iFlPxdyCqBrgMNDzVocQ,unmute=iFlPxdyCqBrgMNDzVocQ,passall=iFlPxdyCqBrgMNDzVocQ,ignore_missing_file=iFlPxdyCqBrgMNDzVocY,show_tol_err=iFlPxdyCqBrgMNDzVocY):
iFlPxdyCqBrgMNDzVofb=iFlPxdyCqBrgMNDzVofE.parse_args()
if question is iFlPxdyCqBrgMNDzVocQ and iFlPxdyCqBrgMNDzVofb.q is not iFlPxdyCqBrgMNDzVocQ:
question=iFlPxdyCqBrgMNDzVofb.q
if "." in question:
question,qitem=[iFlPxdyCqBrgMNDzVocT(v)for v in question.split(".")]
else:
question=iFlPxdyCqBrgMNDzVocT(question)
if not iFlPxdyCqBrgMNDzVocw.isfile(iFlPxdyCqBrgMNDzVofu.computed_answers_file)and not ignore_missing_file:
raise iFlPxdyCqBrgMNDzVock("> Error: The pre-computed answer file",iFlPxdyCqBrgMNDzVocw.abspath(iFlPxdyCqBrgMNDzVofu.computed_answers_file),"does not exist. Check your package installation")
if unmute is iFlPxdyCqBrgMNDzVocQ:
unmute=iFlPxdyCqBrgMNDzVofb.unmute
if passall is iFlPxdyCqBrgMNDzVocQ:
passall=iFlPxdyCqBrgMNDzVofb.passall
iFlPxdyCqBrgMNDzVofa,iFlPxdyCqBrgMNDzVofp=iFlPxdyCqBrgMNDzVoEO(iFlPxdyCqBrgMNDzVofu,question=question,show_progress_bar=not unmute,qitem=qitem,verbose=iFlPxdyCqBrgMNDzVocY,passall=passall,show_expected=iFlPxdyCqBrgMNDzVofb.showexpected,show_computed=iFlPxdyCqBrgMNDzVofb.showcomputed,unmute=unmute,show_tol_err=show_tol_err)
try:
import irlc.lectures
import xlwings
from openpyxl import Workbook
import pandas as pd
from collections import defaultdict
dd=defaultdict(lambda:[])
iFlPxdyCqBrgMNDzVofJ=[]
for k1,(q,_)in iFlPxdyCqBrgMNDzVoct(iFlPxdyCqBrgMNDzVofu.questions):
for k2,(iFlPxdyCqBrgMNDzVofX,_)in iFlPxdyCqBrgMNDzVoct(q.items):
dd['question_index'].append(k1)
dd['item_index'].append(k2)
dd['question'].append(q.name)
dd['item'].append(iFlPxdyCqBrgMNDzVofX.name)
dd['tol'].append(0 if not iFlPxdyCqBrgMNDzVocs(iFlPxdyCqBrgMNDzVofX,'tol')else iFlPxdyCqBrgMNDzVofX.tol)
iFlPxdyCqBrgMNDzVofJ.append(0 if not iFlPxdyCqBrgMNDzVocs(iFlPxdyCqBrgMNDzVofX,'error_computed')else iFlPxdyCqBrgMNDzVofX.error_computed)
iFlPxdyCqBrgMNDzVofm=iFlPxdyCqBrgMNDzVofu.wdir+"/"+iFlPxdyCqBrgMNDzVofu.name+".xlsx"
if iFlPxdyCqBrgMNDzVocw.isfile(iFlPxdyCqBrgMNDzVofm):
iFlPxdyCqBrgMNDzVofU=pd.read_excel(iFlPxdyCqBrgMNDzVofm).to_dict()
else:
iFlPxdyCqBrgMNDzVofU=iFlPxdyCqBrgMNDzVocL()
for k in iFlPxdyCqBrgMNDzVocA(1000):
iFlPxdyCqBrgMNDzVofh='run_'+iFlPxdyCqBrgMNDzVocS(k)
if iFlPxdyCqBrgMNDzVofh in iFlPxdyCqBrgMNDzVofU:
dd[iFlPxdyCqBrgMNDzVofh]=iFlPxdyCqBrgMNDzVocu(iFlPxdyCqBrgMNDzVofU['run_0'].values())
else:
dd[iFlPxdyCqBrgMNDzVofh]=iFlPxdyCqBrgMNDzVofJ
break
iFlPxdyCqBrgMNDzVofS=Workbook()
iFlPxdyCqBrgMNDzVofQ=iFlPxdyCqBrgMNDzVofS.active
for iFlPxdyCqBrgMNDzVofY,iFlPxdyCqBrgMNDzVofh in iFlPxdyCqBrgMNDzVoct(dd.keys()):
iFlPxdyCqBrgMNDzVofQ.cell(row=1,column=iFlPxdyCqBrgMNDzVofY+1).value=iFlPxdyCqBrgMNDzVofh
for iFlPxdyCqBrgMNDzVofT,iFlPxdyCqBrgMNDzVofX in iFlPxdyCqBrgMNDzVoct(dd[iFlPxdyCqBrgMNDzVofh]):
iFlPxdyCqBrgMNDzVofQ.cell(row=iFlPxdyCqBrgMNDzVofT+2,column=iFlPxdyCqBrgMNDzVofY+1).value=iFlPxdyCqBrgMNDzVofX
iFlPxdyCqBrgMNDzVofS.save(iFlPxdyCqBrgMNDzVofm)
iFlPxdyCqBrgMNDzVofS.close()
except ModuleNotFoundError as e:
s=234
pass
if question is iFlPxdyCqBrgMNDzVocQ:
iFlPxdyCqBrgMNDzVocR("Provisional evaluation")
tabulate(iFlPxdyCqBrgMNDzVofp)
iFlPxdyCqBrgMNDzVofk=iFlPxdyCqBrgMNDzVofp
iFlPxdyCqBrgMNDzVocR(tabulate(iFlPxdyCqBrgMNDzVofk))
iFlPxdyCqBrgMNDzVocR(" ")
fr=iFlPxdyCqBrgMNDzVocb(iFlPxdyCqBrgMNDzVocK())[1].filename
iFlPxdyCqBrgMNDzVoft=iFlPxdyCqBrgMNDzVocw.basename(fr)[:-3]+"_grade.py"
if iFlPxdyCqBrgMNDzVocw.exists(iFlPxdyCqBrgMNDzVoft):
iFlPxdyCqBrgMNDzVocR("Note your results have not yet been registered. \nTo register your results, please run the file:")
iFlPxdyCqBrgMNDzVocR(">>>",iFlPxdyCqBrgMNDzVoft)
iFlPxdyCqBrgMNDzVocR("In the same manner as you ran this file.")
return iFlPxdyCqBrgMNDzVofa
def iFlPxdyCqBrgMNDzVoEG(q):
h=[(i['w'],i['possible'],i['obtained'])for i in q.values()]
h=iFlPxdyCqBrgMNDzVoEX(h)
return h[:,0],h[:,1],h[:,2],
def iFlPxdyCqBrgMNDzVoEO(iFlPxdyCqBrgMNDzVofu,question=iFlPxdyCqBrgMNDzVocQ,qitem=iFlPxdyCqBrgMNDzVocQ,passall=iFlPxdyCqBrgMNDzVocY,verbose=iFlPxdyCqBrgMNDzVocY,show_expected=iFlPxdyCqBrgMNDzVocY,show_computed=iFlPxdyCqBrgMNDzVocY,unmute=iFlPxdyCqBrgMNDzVocY,show_help_flag=iFlPxdyCqBrgMNDzVocG,silent=iFlPxdyCqBrgMNDzVocY,show_progress_bar=iFlPxdyCqBrgMNDzVocG,show_tol_err=iFlPxdyCqBrgMNDzVocY):
iFlPxdyCqBrgMNDzVofs=iFlPxdyCqBrgMNDzVocE()
iFlPxdyCqBrgMNDzVofL=iFlPxdyCqBrgMNDzVocj("UnitGrade",font="doom")
b="\n".join([l for l in iFlPxdyCqBrgMNDzVofL.splitlines()if iFlPxdyCqBrgMNDzVocO(l.strip())>0])
iFlPxdyCqBrgMNDzVocR(b+" v"+__version__)
iFlPxdyCqBrgMNDzVofA=iFlPxdyCqBrgMNDzVofs.strftime("%d/%m/%Y %H:%M:%S")
iFlPxdyCqBrgMNDzVocR("Started: "+iFlPxdyCqBrgMNDzVofA)
s=iFlPxdyCqBrgMNDzVofu.title
if iFlPxdyCqBrgMNDzVofu.version is not iFlPxdyCqBrgMNDzVocQ:
s+=" version "+iFlPxdyCqBrgMNDzVofu.version
iFlPxdyCqBrgMNDzVocR("Evaluating "+s,"(use --help for options)" if show_help_flag else "")
iFlPxdyCqBrgMNDzVocR(f"Loaded answers from: ",iFlPxdyCqBrgMNDzVofu.computed_answers_file,"\n")
iFlPxdyCqBrgMNDzVofp=[]
nL=80
iFlPxdyCqBrgMNDzVofR=iFlPxdyCqBrgMNDzVocm()
iFlPxdyCqBrgMNDzVofG={}
for n,(q,w)in iFlPxdyCqBrgMNDzVoct(iFlPxdyCqBrgMNDzVofu.questions):
iFlPxdyCqBrgMNDzVofO=iFlPxdyCqBrgMNDzVocI(q.__class__,Hidden)
if question is not iFlPxdyCqBrgMNDzVocQ and n+1!=question:
continue
iFlPxdyCqBrgMNDzVofI="Question %i: %s"%(n+1,q.title)
iFlPxdyCqBrgMNDzVocR(iFlPxdyCqBrgMNDzVofI,end="")
q.possible=0
q.obtained=0
q_={}
for j,(iFlPxdyCqBrgMNDzVofX,iw)in iFlPxdyCqBrgMNDzVoct(q.items):
if qitem is not iFlPxdyCqBrgMNDzVocQ and question is not iFlPxdyCqBrgMNDzVocQ and iFlPxdyCqBrgMNDzVofX is not iFlPxdyCqBrgMNDzVocQ and j+1!=qitem:
continue
if not q.has_called_init_:
iFlPxdyCqBrgMNDzVofe=iFlPxdyCqBrgMNDzVocm()
cc=iFlPxdyCqBrgMNDzVocQ
if show_progress_bar:
cc=ActiveProgress(t=q.estimated_time,title=iFlPxdyCqBrgMNDzVofI)
with iFlPxdyCqBrgMNDzVocW('Capturing')(unmute=unmute):
try:
q.init()
except iFlPxdyCqBrgMNDzVock as e:
if not passall:
if not silent:
iFlPxdyCqBrgMNDzVocR(" ")
iFlPxdyCqBrgMNDzVocR("="*30)
iFlPxdyCqBrgMNDzVocR(f"When initializing question {q.title} the initialization code threw an error")
iFlPxdyCqBrgMNDzVocR(e)
iFlPxdyCqBrgMNDzVocR("The remaining parts of this question will likely fail.")
iFlPxdyCqBrgMNDzVocR("="*30)
if show_progress_bar:
cc.terminate()
iFlPxdyCqBrgMNDzVocJ.flush()
iFlPxdyCqBrgMNDzVocR(iFlPxdyCqBrgMNDzVofI,end="")
q.has_called_init_=iFlPxdyCqBrgMNDzVocG
iFlPxdyCqBrgMNDzVofH=iFlPxdyCqBrgMNDzVocf(iFlPxdyCqBrgMNDzVocm()-iFlPxdyCqBrgMNDzVofe,2)
iFlPxdyCqBrgMNDzVocR(" "*iFlPxdyCqBrgMNDzVoce(0,nL-iFlPxdyCqBrgMNDzVocO(iFlPxdyCqBrgMNDzVofI))+(" ("+iFlPxdyCqBrgMNDzVocS(iFlPxdyCqBrgMNDzVofH)+" seconds)" if iFlPxdyCqBrgMNDzVofH>=0.1 else ""))
iFlPxdyCqBrgMNDzVocR("="*nL)
iFlPxdyCqBrgMNDzVofX.question=q
iFlPxdyCqBrgMNDzVoEf=ss="*** q%i.%i) %s"%(n+1,j+1,iFlPxdyCqBrgMNDzVofX.title)
if show_progress_bar:
cc=ActiveProgress(t=iFlPxdyCqBrgMNDzVofX.estimated_time,title=iFlPxdyCqBrgMNDzVoEf)
else:
iFlPxdyCqBrgMNDzVocR(iFlPxdyCqBrgMNDzVoEf+('.'*iFlPxdyCqBrgMNDzVoce(0,nL-4-iFlPxdyCqBrgMNDzVocO(ss))),end="")
iFlPxdyCqBrgMNDzVoEc=iFlPxdyCqBrgMNDzVocI(iFlPxdyCqBrgMNDzVofX.__class__,Hidden)
iFlPxdyCqBrgMNDzVofe=iFlPxdyCqBrgMNDzVocm()
(iFlPxdyCqBrgMNDzVoEj,iFlPxdyCqBrgMNDzVoEb)=iFlPxdyCqBrgMNDzVofX.get_points(show_expected=show_expected,show_computed=show_computed,unmute=unmute,passall=passall,silent=silent)
q_[j]={'w':iw,'possible':iFlPxdyCqBrgMNDzVoEb,'obtained':iFlPxdyCqBrgMNDzVoEj,'hidden':iFlPxdyCqBrgMNDzVoEc,'computed':iFlPxdyCqBrgMNDzVocS(iFlPxdyCqBrgMNDzVofX._computed_answer),'title':iFlPxdyCqBrgMNDzVofX.title}
iFlPxdyCqBrgMNDzVoEK=iFlPxdyCqBrgMNDzVocf(iFlPxdyCqBrgMNDzVocm()-iFlPxdyCqBrgMNDzVofe,2)
if show_progress_bar:
cc.terminate()
iFlPxdyCqBrgMNDzVocJ.flush()
iFlPxdyCqBrgMNDzVocR(iFlPxdyCqBrgMNDzVoEf+('.'*iFlPxdyCqBrgMNDzVoce(0,nL-4-iFlPxdyCqBrgMNDzVocO(ss))),end="")
if not iFlPxdyCqBrgMNDzVoEc:
ss="PASS" if iFlPxdyCqBrgMNDzVoEj==iFlPxdyCqBrgMNDzVoEb else "*** FAILED"
if iFlPxdyCqBrgMNDzVoEK>=0.1:
ss+=" ("+iFlPxdyCqBrgMNDzVocS(iFlPxdyCqBrgMNDzVoEK)+" seconds)"
iFlPxdyCqBrgMNDzVocR(ss)
ws,iFlPxdyCqBrgMNDzVoEb,iFlPxdyCqBrgMNDzVoEw=iFlPxdyCqBrgMNDzVoEG(q_)
iFlPxdyCqBrgMNDzVoEb=iFlPxdyCqBrgMNDzVocT(ws@iFlPxdyCqBrgMNDzVoEb)
iFlPxdyCqBrgMNDzVoEw=iFlPxdyCqBrgMNDzVocT(ws@iFlPxdyCqBrgMNDzVoEw)
iFlPxdyCqBrgMNDzVoEw=iFlPxdyCqBrgMNDzVocT(myround(iFlPxdyCqBrgMNDzVocT((w*iFlPxdyCqBrgMNDzVoEw)/iFlPxdyCqBrgMNDzVoEb)))if iFlPxdyCqBrgMNDzVoEb>0 else 0
iFlPxdyCqBrgMNDzVofG[n]={'w':w,'possible':w,'obtained':iFlPxdyCqBrgMNDzVoEw,'items':q_,'hidden':iFlPxdyCqBrgMNDzVofO,'title':q.title}
q.obtained=iFlPxdyCqBrgMNDzVoEw
q.possible=iFlPxdyCqBrgMNDzVoEb
s1=f"*** Question q{n+1}"
s2=f" {q.obtained}/{w}"
iFlPxdyCqBrgMNDzVocR(s1+("."*(nL-iFlPxdyCqBrgMNDzVocO(s1)-iFlPxdyCqBrgMNDzVocO(s2)))+s2)
iFlPxdyCqBrgMNDzVocR(" ")
iFlPxdyCqBrgMNDzVofp.append([f"Question q{n+1}",f"{q.obtained}/{w}"])
ws,iFlPxdyCqBrgMNDzVoEb,iFlPxdyCqBrgMNDzVoEw=iFlPxdyCqBrgMNDzVoEG(iFlPxdyCqBrgMNDzVofG)
iFlPxdyCqBrgMNDzVoEb=iFlPxdyCqBrgMNDzVocT(msum(iFlPxdyCqBrgMNDzVoEb))
iFlPxdyCqBrgMNDzVoEw=iFlPxdyCqBrgMNDzVocT(msum(iFlPxdyCqBrgMNDzVoEw))
iFlPxdyCqBrgMNDzVofu.possible=iFlPxdyCqBrgMNDzVoEb
iFlPxdyCqBrgMNDzVofu.obtained=iFlPxdyCqBrgMNDzVoEw
iFlPxdyCqBrgMNDzVofs=iFlPxdyCqBrgMNDzVocE()
iFlPxdyCqBrgMNDzVofA=iFlPxdyCqBrgMNDzVofs.strftime("%H:%M:%S")
dt=iFlPxdyCqBrgMNDzVocT(iFlPxdyCqBrgMNDzVocm()-iFlPxdyCqBrgMNDzVofR)
iFlPxdyCqBrgMNDzVoEv=dt//60
iFlPxdyCqBrgMNDzVoEn=dt-iFlPxdyCqBrgMNDzVoEv*60
iFlPxdyCqBrgMNDzVoEa=lambda i,s:iFlPxdyCqBrgMNDzVocS(i)+" "+s+("s" if i!=1 else "")
iFlPxdyCqBrgMNDzVocR(f"Completed: "+iFlPxdyCqBrgMNDzVofA+" ("+iFlPxdyCqBrgMNDzVoEa(iFlPxdyCqBrgMNDzVoEv,"minute")+", "+iFlPxdyCqBrgMNDzVoEa(iFlPxdyCqBrgMNDzVoEn,"second")+")")
iFlPxdyCqBrgMNDzVofp.append(["Total",""+iFlPxdyCqBrgMNDzVocS(iFlPxdyCqBrgMNDzVofu.obtained)+"/"+iFlPxdyCqBrgMNDzVocS(iFlPxdyCqBrgMNDzVofu.possible)])
iFlPxdyCqBrgMNDzVofa={'total':(iFlPxdyCqBrgMNDzVoEw,iFlPxdyCqBrgMNDzVoEb),'details':iFlPxdyCqBrgMNDzVofG}
return iFlPxdyCqBrgMNDzVofa,iFlPxdyCqBrgMNDzVofp
from tabulate import tabulate
from datetime import datetime
iFlPxdyCqBrgMNDzVocE=datetime.now
import inspect
iFlPxdyCqBrgMNDzVocK=inspect.currentframe
iFlPxdyCqBrgMNDzVocb=inspect.getouterframes
import json
iFlPxdyCqBrgMNDzVocU=json.dumps
import os
iFlPxdyCqBrgMNDzVocn=os.getcwd
iFlPxdyCqBrgMNDzVocv=os.walk
iFlPxdyCqBrgMNDzVocw=os.path
import bz2
import pickle
iFlPxdyCqBrgMNDzVoch=pickle.loads
iFlPxdyCqBrgMNDzVocw=os.path
import os
iFlPxdyCqBrgMNDzVocn=os.getcwd
iFlPxdyCqBrgMNDzVocv=os.walk
def iFlPxdyCqBrgMNDzVoEI(iFlPxdyCqBrgMNDzVoEQ,iFlPxdyCqBrgMNDzVoEs):
with iFlPxdyCqBrgMNDzVocH(bz2,'open')(iFlPxdyCqBrgMNDzVoEs,"wt")as f:
f.write(iFlPxdyCqBrgMNDzVoEQ)
def iFlPxdyCqBrgMNDzVoEW(imp):
iFlPxdyCqBrgMNDzVoEp={}
m=imp
f=m.__file__
iFlPxdyCqBrgMNDzVoEJ=iFlPxdyCqBrgMNDzVocw.dirname(__import__(m.__name__.split('.')[0]).__file__)
iFlPxdyCqBrgMNDzVoEJ=iFlPxdyCqBrgMNDzVocw.dirname(iFlPxdyCqBrgMNDzVoEJ)
if f.endswith("__init__.py"):
for iFlPxdyCqBrgMNDzVoEm,dirs,files in iFlPxdyCqBrgMNDzVocv(iFlPxdyCqBrgMNDzVocw.dirname(f)):
for iFlPxdyCqBrgMNDzVoEU in files:
if iFlPxdyCqBrgMNDzVoEU.endswith(".py"):
v=iFlPxdyCqBrgMNDzVocw.relpath(iFlPxdyCqBrgMNDzVocw.join(iFlPxdyCqBrgMNDzVoEm,iFlPxdyCqBrgMNDzVoEU),iFlPxdyCqBrgMNDzVoEJ)
with iFlPxdyCqBrgMNDzVocX(iFlPxdyCqBrgMNDzVocw.join(iFlPxdyCqBrgMNDzVoEm,iFlPxdyCqBrgMNDzVoEU),'r')as ff:
iFlPxdyCqBrgMNDzVoEp[v]=ff.read()
else:
v=iFlPxdyCqBrgMNDzVocw.relpath(f,iFlPxdyCqBrgMNDzVoEJ)
with iFlPxdyCqBrgMNDzVocX(f,'r')as ff:
iFlPxdyCqBrgMNDzVoEp[v]=ff.read()
return iFlPxdyCqBrgMNDzVoEp
def iFlPxdyCqBrgMNDzVoEe(iFlPxdyCqBrgMNDzVofu,output_dir=iFlPxdyCqBrgMNDzVocQ):
n=80
iFlPxdyCqBrgMNDzVofa,iFlPxdyCqBrgMNDzVofp=iFlPxdyCqBrgMNDzVoEO(iFlPxdyCqBrgMNDzVofu,show_help_flag=iFlPxdyCqBrgMNDzVocY,show_expected=iFlPxdyCqBrgMNDzVocY,show_computed=iFlPxdyCqBrgMNDzVocY,silent=iFlPxdyCqBrgMNDzVocG)
iFlPxdyCqBrgMNDzVocR(" ")
iFlPxdyCqBrgMNDzVocR("="*n)
iFlPxdyCqBrgMNDzVocR("Final evaluation")
iFlPxdyCqBrgMNDzVocR(tabulate(iFlPxdyCqBrgMNDzVofp))
if iFlPxdyCqBrgMNDzVocO(iFlPxdyCqBrgMNDzVofu.individual_imports)>0:
iFlPxdyCqBrgMNDzVocR("By uploading the .token file, you verify the files:")
for m in iFlPxdyCqBrgMNDzVofu.individual_imports:
iFlPxdyCqBrgMNDzVocR(">",m.__file__)
iFlPxdyCqBrgMNDzVocR("Are created/modified individually by you in agreement with DTUs exam rules")
iFlPxdyCqBrgMNDzVofu.pack_imports+=iFlPxdyCqBrgMNDzVofu.individual_imports
iFlPxdyCqBrgMNDzVoEh={}
if iFlPxdyCqBrgMNDzVocO(iFlPxdyCqBrgMNDzVofu.pack_imports)>0:
iFlPxdyCqBrgMNDzVocR("Including files in upload...")
for m in iFlPxdyCqBrgMNDzVofu.pack_imports:
iFlPxdyCqBrgMNDzVoES =iFlPxdyCqBrgMNDzVoEW(m)
if iFlPxdyCqBrgMNDzVocO([k for k in iFlPxdyCqBrgMNDzVoES if k not in iFlPxdyCqBrgMNDzVoEh])>0:
iFlPxdyCqBrgMNDzVocR(f"*** {m.__name__}")
iFlPxdyCqBrgMNDzVoEh={**iFlPxdyCqBrgMNDzVoEh,**iFlPxdyCqBrgMNDzVoES}
iFlPxdyCqBrgMNDzVofa['sources']=iFlPxdyCqBrgMNDzVoEh
iFlPxdyCqBrgMNDzVoEQ=iFlPxdyCqBrgMNDzVocU(iFlPxdyCqBrgMNDzVofa,indent=4)
if output_dir is iFlPxdyCqBrgMNDzVocQ:
output_dir=iFlPxdyCqBrgMNDzVocn()
iFlPxdyCqBrgMNDzVoET=iFlPxdyCqBrgMNDzVofu.__class__.__name__+"_handin"
iFlPxdyCqBrgMNDzVoEk,iFlPxdyCqBrgMNDzVoEb=iFlPxdyCqBrgMNDzVofa['total']
iFlPxdyCqBrgMNDzVoEt="_v"+iFlPxdyCqBrgMNDzVofu.version if iFlPxdyCqBrgMNDzVofu.version is not iFlPxdyCqBrgMNDzVocQ else ""
iFlPxdyCqBrgMNDzVoEs="%s_%i_of_%i%s.token"%(iFlPxdyCqBrgMNDzVoET,iFlPxdyCqBrgMNDzVoEk,iFlPxdyCqBrgMNDzVoEb,iFlPxdyCqBrgMNDzVoEt)
iFlPxdyCqBrgMNDzVoEs=iFlPxdyCqBrgMNDzVocw.join(output_dir,iFlPxdyCqBrgMNDzVoEs)
iFlPxdyCqBrgMNDzVoEI(iFlPxdyCqBrgMNDzVoEQ,iFlPxdyCqBrgMNDzVoEs)
iFlPxdyCqBrgMNDzVocR(" ")
iFlPxdyCqBrgMNDzVocR("To get credit for your results, please upload the single file: ")
iFlPxdyCqBrgMNDzVocR(">",iFlPxdyCqBrgMNDzVoEs)
iFlPxdyCqBrgMNDzVocR("To campusnet without any modifications.")
def iFlPxdyCqBrgMNDzVoEH(iFlPxdyCqBrgMNDzVoEu,iFlPxdyCqBrgMNDzVoEL,payload):
iFlPxdyCqBrgMNDzVocW("exec")(iFlPxdyCqBrgMNDzVoEL,iFlPxdyCqBrgMNDzVojf())
pl=iFlPxdyCqBrgMNDzVoch(iFlPxdyCqBrgMNDzVojE.fromhex(payload))
iFlPxdyCqBrgMNDzVofu=iFlPxdyCqBrgMNDzVocW(iFlPxdyCqBrgMNDzVoEu)(payload=pl,strict=iFlPxdyCqBrgMNDzVocG)
return iFlPxdyCqBrgMNDzVofu
iFlPxdyCqBrgMNDzVoEL='import os\n\n# DONT\'t import stuff here since install script requires __version__\n\ndef cache_write(object, file_name, verbose=True):\n import compress_pickle\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n if verbose: print("Writing cache...", file_name)\n with open(file_name, \'wb\', ) as f:\n compress_pickle.dump(object, f, compression="lzma")\n if verbose: print("Done!")\n\n\ndef cache_exists(file_name):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name):\n import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n try:\n with open(file_name, \'rb\') as f:\n return compress_pickle.load(f, compression="lzma")\n except Exception as e:\n print("Tried to load a bad pickle file at", file_name)\n print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n print(e)\n # return pickle.load(f)\n else:\n return None\n\n\n\n"""\ngit add . && git commit -m "Options" && git push && pip install git+ssh://git@gitlab.compute.dtu.dk/tuhe/unitgrade.git --upgrade\n\n"""\nimport unittest\nimport numpy as np\nimport os\nimport sys\nfrom io import StringIO\nimport collections\nimport inspect\nimport re\nimport threading\nimport tqdm\nimport time\n\nmyround = lambda x: np.round(x) # required.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\ndef setup_dir_by_class(C,base_dir):\n name = C.__class__.__name__\n # base_dir = os.path.join(base_dir, name)\n # if not os.path.isdir(base_dir):\n # os.makedirs(base_dir)\n return base_dir, name\n\nclass Hidden:\n def hide(self):\n return True\n\nclass Logger(object):\n def __init__(self, buffer):\n self.terminal = sys.stdout\n self.log = buffer\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n pass\n\nclass Capturing(list):\n def __init__(self, *args, unmute=False, **kwargs):\n self.unmute = unmute\n super().__init__(*args, **kwargs)\n\n def __enter__(self, capture_errors=True): # don\'t put arguments here.\n self._stdout = sys.stdout\n self._stringio = StringIO()\n if self.unmute:\n sys.stdout = Logger(self._stringio)\n else:\n sys.stdout = self._stringio\n\n if capture_errors:\n self._sterr = sys.stderr\n sys.sterr = StringIO() # memory hole it\n self.capture_errors = capture_errors\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n if self.capture_errors:\n sys.sterr = self._sterr\n\n\nclass QItem(unittest.TestCase):\n title = None\n testfun = None\n tol = 0\n estimated_time = 0.42\n _precomputed_payload = None\n _computed_answer = None # Internal helper to later get results.\n # _precomputed_title = None\n\n def __init__(self, working_directory=None, correct_answer_payload=None, question=None, *args, **kwargs):\n if self.tol > 0 and self.testfun is None:\n self.testfun = self.assertL2Relative\n elif self.testfun is None:\n self.testfun = self.assertEqual\n\n self.name = self.__class__.__name__\n self._correct_answer_payload = correct_answer_payload\n self.question = question\n # self.a = "not set"\n\n super().__init__(*args, **kwargs)\n if self.title is None:\n self.title = self.name\n\n def _safe_get_title(self):\n if self._precomputed_title is not None:\n return self._precomputed_title\n return self.title\n\n # def get_title(self):\n # Overwrite this to compute a post-computed title.\n # return None\n\n def assertNorm(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed).flat- np.asarray(expected)).flat )\n nrm = np.sqrt(np.sum( diff ** 2))\n\n self.error_computed = nrm\n\n if nrm > tol:\n print(f"Not equal within tolerance {tol}; norm of difference was {nrm}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n self.error_computed = np.max(diff)\n\n if np.max(diff) > tol:\n print("Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def assertL2Relative(self, computed, expected, tol=None):\n if tol == None:\n tol = self.tol\n diff = np.abs( (np.asarray(computed) - np.asarray(expected)) )\n diff = diff / (1e-8 + np.abs( (np.asarray(computed) + np.asarray(expected)) ) )\n self.error_computed = np.max(np.abs(diff))\n if np.sum(diff > tol) > 0:\n print(f"Not equal within tolerance {tol}")\n print(f"Element-wise differences {diff.tolist()}")\n self.assertEqual(computed, expected, msg=f"Not equal within tolerance {tol}")\n\n def precomputed_payload(self):\n return self._precomputed_payload\n\n def precompute_payload(self):\n # Pre-compute resources to include in tests (useful for getting around rng).\n pass\n\n def compute_answer(self, unmute=False):\n raise NotImplementedError("test code here")\n\n def test(self, computed, expected):\n self.testfun(computed, expected)\n\n def get_points(self, verbose=False, show_expected=False, show_computed=False,unmute=False, passall=False, silent=False, **kwargs):\n possible = 1\n computed = None\n def show_computed_(computed):\n print(">>> Your output:")\n print(computed)\n\n def show_expected_(expected):\n print(">>> Expected output (note: may have been processed; read text script):")\n print(expected)\n\n correct = self._correct_answer_payload\n try:\n if unmute: # Required to not mix together print stuff.\n print("")\n computed = self.compute_answer(unmute=unmute)\n except Exception as e:\n if not passall:\n if not silent:\n print("\\n=================================================================================")\n print(f"When trying to run test class \'{self.name}\' your code threw an error:", e)\n show_expected_(correct)\n import traceback\n print(traceback.format_exc())\n print("=================================================================================")\n return (0, possible)\n\n if self._computed_answer is None:\n self._computed_answer = computed\n\n if show_expected or show_computed:\n print("\\n")\n if show_expected:\n show_expected_(correct)\n if show_computed:\n show_computed_(computed)\n try:\n if not passall:\n self.test(computed=computed, expected=correct)\n except Exception as e:\n if not silent:\n print("\\n=================================================================================")\n print(f"Test output from test class \'{self.name}\' does not match expected result. Test error:")\n print(e)\n show_computed_(computed)\n show_expected_(correct)\n return (0, possible)\n return (1, possible)\n\n def score(self):\n try:\n self.test()\n except Exception as e:\n return 0\n return 1\n\nclass QPrintItem(QItem):\n def compute_answer_print(self):\n """\n Generate output which is to be tested. By default, both text written to the terminal using print(...) as well as return values\n are send to process_output (see compute_answer below). In other words, the text generated is:\n\n res = compute_Answer_print()\n txt = (any terminal output generated above)\n numbers = (any numbers found in terminal-output txt)\n\n self.test(process_output(res, txt, numbers), <expected result>)\n\n :return: Optional values for comparison\n """\n raise Exception("Generate output here. The output is passed to self.process_output")\n\n def process_output(self, res, txt, numbers):\n return res\n\n def compute_answer(self, unmute=False):\n with Capturing(unmute=unmute) as output:\n res = self.compute_answer_print()\n s = "\\n".join(output)\n s = rm_progress_bar(s) # Remove progress bar.\n numbers = extract_numbers(s)\n self._computed_answer = (res, s, numbers)\n return self.process_output(res, s, numbers)\n\nclass OrderedClassMembers(type):\n @classmethod\n def __prepare__(self, name, bases):\n return collections.OrderedDict()\n def __new__(self, name, bases, classdict):\n ks = list(classdict.keys())\n for b in bases:\n ks += b.__ordered__\n classdict[\'__ordered__\'] = [key for key in ks if key not in (\'__module__\', \'__qualname__\')]\n return type.__new__(self, name, bases, classdict)\n\nclass QuestionGroup(metaclass=OrderedClassMembers):\n title = "Graph search"\n items = None\n partially_scored = False\n t_init = 0 # Time spend on initialization (placeholder; set this externally).\n estimated_time = 0.42\n\n def __init__(self, *args, **kwargs):\n\n self.name = self.__class__.__name__\n if self.items is None:\n self.items = []\n members = [gt for gt in [getattr(self, gt) for gt in self.__ordered__ if gt not in ["__classcell__", "__init__"]] if inspect.isclass(gt) and issubclass(gt, QItem)]\n for gt in members:\n self.items.append( (gt, 1) )\n self.items = [(I(question=self), w) for I, w in self.items]\n self.has_called_init_ = False\n\n def init(self):\n # Can be used to set resources relevant for this question instance.\n pass\n\nclass Report():\n title = "report title"\n version = None\n questions = []\n pack_imports = []\n individual_imports = []\n\n def __init__(self, strict=False, payload=None):\n working_directory = os.path.abspath(os.path.dirname(inspect.getfile(type(self))))\n self.wdir, self.name = setup_dir_by_class(self, working_directory)\n self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n import time\n qs = [] # Has to accumulate to new array otherwise the setup/evaluation steps cannot be run in sequence.\n for k, (Q, w) in enumerate(self.questions):\n # print(k, Q)\n start = time.time()\n q = (Q(working_directory=self.wdir), w)\n q[0].t_init = time.time() - start\n # if time.time() -start > 0.2:\n # raise Exception(Q, "Question takes to long to initialize. Use the init() function to set local variables instead")\n # print(time.time()-start)\n qs.append(q)\n self.questions = qs\n # self.questions = [(Q(working_directory=self.wdir),w) for Q,w in self.questions]\n if payload is not None:\n self.set_payload(payload, strict=strict)\n else:\n if os.path.isfile(self.computed_answers_file):\n self.set_payload(cache_read(self.computed_answers_file), strict=strict)\n else:\n s = f"> Warning: The pre-computed answer file, {os.path.abspath(self.computed_answers_file)} is missing. The framework will NOT work as intended. Reasons may be a broken local installation."\n if strict:\n raise Exception(s)\n else:\n print(s)\n\n\n def set_payload(self, payloads, strict=False):\n for q, _ in self.questions:\n for item, _ in q.items:\n if q.name not in payloads or item.name not in payloads[q.name]:\n s = f"> Broken resource dictionary submitted to unitgrade for question {q.name} and subquestion {item.name}. Framework will not work."\n if strict:\n raise Exception(s)\n else:\n print(s)\n else:\n item._correct_answer_payload = payloads[q.name][item.name][\'payload\']\n item.estimated_time = payloads[q.name][item.name].get("time", 1) #"[\'time\']\n q.estimated_time = payloads[q.name].get("time", 1)\n if "precomputed" in payloads[q.name][item.name]: # Consider removing later.\n item._precomputed_payload = payloads[q.name][item.name][\'precomputed\']\n try:\n if "title" in payloads[q.name][item.name]:\n item.title = payloads[q.name][item.name][\'title\']\n except Exception as e:\n pass\n print("bad", e)\n self.payloads = payloads\n\n\ndef rm_progress_bar(txt):\n # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n nlines = []\n for l in txt.splitlines():\n pct = l.find("%")\n ql = False\n if pct > 0:\n i = l.find("|", pct+1)\n if i > 0 and l.find("|", i+1) > 0:\n ql = True\n if not ql:\n nlines.append(l)\n return "\\n".join(nlines)\n\ndef extract_numbers(txt):\n # txt = rm_progress_bar(txt)\n numeric_const_pattern = \'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n all = rx.findall(txt)\n all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n if len(all) > 500:\n print(txt)\n raise Exception("unitgrade.unitgrade.py: Warning, many numbers!", len(all))\n return all\n\n\nclass ActiveProgress():\n def __init__(self, t, start=True, title="my progress bar"):\n self.t = t\n self._running = False\n self.title = title\n self.dt = 0.1\n\n self.n = int(np.round(self.t / self.dt))\n # self.pbar = tqdm.tqdm(total=self.n)\n\n\n if start:\n self.start()\n\n def start(self):\n self._running = True\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n\n def terminate(self):\n\n\n self._running = False\n self.thread.join()\n if hasattr(self, \'pbar\') and self.pbar is not None:\n self.pbar.update(1)\n self.pbar.close()\n self.pbar=None\n\n sys.stdout.flush()\n\n def run(self):\n self.pbar = tqdm.tqdm(total=self.n, file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100,\n bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\') # , unit_scale=dt, unit=\'seconds\'):\n\n for _ in range(self.n-1): # Don\'t terminate completely; leave bar at 99% done until terminate.\n if not self._running:\n self.pbar.close()\n self.pbar = None\n break\n\n time.sleep(self.dt)\n self.pbar.update(1)\n\n # if self.pbar is not None:\n # self.pbar.close()\n # self.pbar = None\n # for _ in tqdm.tqdm(range(n), file=sys.stdout, position=0, leave=False, desc=self.title, ncols=100, bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\'): #, unit_scale=dt, unit=\'seconds\'):\n\n\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\n# import unitgrade\n\n# from unitgrade.unitgrade import Hidden\n# import unitgrade as ug\n# import unitgrade.unitgrade as ug\nimport inspect\nimport os\nimport argparse\nimport sys\nimport time\nimport threading # don\'t import Thread bc. of minify issue.\nimport tqdm # don\'t do from tqdm import tqdm because of minify-issue\n#from threading import Thread # This import presents a problem for the minify-code compression tool.\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\', action="store_true", help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\', action="store_true", help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\', action="store_true", help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\', action="store_true", help=\'Automatically pass all tests. Useful when debugging.\')\n\n# parser.add_argument(\'integers\', metavar=\'N\', type=int, nargs=\'+\',\n# help=\'an integer for the accumulator\')\n# parser.add_argument(\'--sum\', dest=\'accumulate\', action=\'store_const\',\n# const=sum, default=max,\n# help=\'sum the integers (default: find the max)\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False, show_tol_err=False):\n args = parser.parse_args()\n if question is None and args.q is not None:\n question = args.q\n if "." in question:\n question, qitem = [int(v) for v in question.split(".")]\n else:\n question = int(question)\n\n if not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n if unmute is None:\n unmute = args.unmute\n if passall is None:\n passall = args.passall\n\n results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute, qitem=qitem, verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n show_tol_err=show_tol_err)\n\n try: # For registering stats.\n import irlc.lectures\n import xlwings\n from openpyxl import Workbook\n import pandas as pd\n from collections import defaultdict\n dd = defaultdict(lambda: [])\n error_computed = []\n for k1, (q, _) in enumerate(report.questions):\n for k2, (item, _) in enumerate(q.items):\n dd[\'question_index\'].append(k1)\n dd[\'item_index\'].append(k2)\n dd[\'question\'].append(q.name)\n dd[\'item\'].append(item.name)\n dd[\'tol\'].append(0 if not hasattr(item, \'tol\') else item.tol)\n error_computed.append(0 if not hasattr(item, \'error_computed\') else item.error_computed)\n\n qstats = report.wdir + "/" + report.name + ".xlsx"\n\n if os.path.isfile(qstats):\n d_read = pd.read_excel(qstats).to_dict()\n else:\n d_read = dict()\n\n for k in range(1000):\n key = \'run_\'+str(k)\n if key in d_read:\n dd[key] = list(d_read[\'run_0\'].values())\n else:\n dd[key] = error_computed\n break\n\n workbook = Workbook()\n worksheet = workbook.active\n for col, key in enumerate(dd.keys()):\n worksheet.cell(row=1, column=col+1).value = key\n for row, item in enumerate(dd[key]):\n worksheet.cell(row=row+2, column=col+1).value = item\n\n workbook.save(qstats)\n workbook.close()\n\n except ModuleNotFoundError as e:\n s = 234\n pass\n\n if question is None:\n print("Provisional evaluation")\n tabulate(table_data)\n table = table_data\n print(tabulate(table))\n print(" ")\n\n fr = inspect.getouterframes(inspect.currentframe())[1].filename\n gfile = os.path.basename(fr)[:-3] + "_grade.py"\n if os.path.exists(gfile):\n print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n print(">>>", gfile)\n print("In the same manner as you ran this file.")\n return results\n\n\ndef upack(q):\n # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n h = np.asarray(h)\n return h[:,0], h[:,1], h[:,2],\n\n\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False, show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n show_progress_bar=True,\n show_tol_err=False):\n now = datetime.now()\n ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n print(b + " v" + __version__)\n dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n print("Started: " + dt_string)\n s = report.title\n if report.version is not None:\n s += " version " + report.version\n print("Evaluating " + s, "(use --help for options)" if show_help_flag else "")\n print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n table_data = []\n nL = 80\n t_start = time.time()\n score = {}\n for n, (q, w) in enumerate(report.questions):\n q_hidden = issubclass(q.__class__, Hidden)\n # report.globals = q.globals\n # q.globals = report.globals\n if question is not None and n+1 != question:\n continue\n\n # Don\'t use f format strings.\n q_title_print = "Question %i: %s"%(n+1, q.title)\n print(q_title_print, end="")\n # sys.stdout.flush()\n q.possible = 0\n q.obtained = 0\n q_ = {} # Gather score in this class.\n # Active progress bar.\n\n for j, (item, iw) in enumerate(q.items):\n if qitem is not None and question is not None and item is not None and j+1 != qitem:\n continue\n if not q.has_called_init_:\n start = time.time()\n\n cc = None\n if show_progress_bar:\n cc = ActiveProgress(t=q.estimated_time, title=q_title_print)\n with eval(\'Capturing\')(unmute=unmute): # Clunky import syntax is required bc. of minify issue.\n try:\n q.init() # Initialize the question. Useful for sharing resources.\n except Exception as e:\n if not passall:\n if not silent:\n print(" ")\n print("="*30)\n print(f"When initializing question {q.title} the initialization code threw an error")\n print(e)\n print("The remaining parts of this question will likely fail.")\n print("="*30)\n\n if show_progress_bar:\n cc.terminate()\n sys.stdout.flush()\n print(q_title_print, end="")\n\n q.has_called_init_ = True\n q_time =np.round( time.time()-start, 2)\n\n print(" "* max(0,nL - len(q_title_print) ) + (" (" + str(q_time) + " seconds)" if q_time >= 0.1 else "") ) # if q.name in report.payloads else "")\n print("=" * nL)\n\n item.question = q # Set the parent question instance for later reference.\n item_title_print = ss = "*** q%i.%i) %s"%(n+1, j+1, item.title)\n\n if show_progress_bar:\n cc = ActiveProgress(t=item.estimated_time, title=item_title_print)\n else:\n print(item_title_print + ( \'.\'*max(0, nL-4-len(ss)) ), end="")\n hidden = issubclass(item.__class__, Hidden)\n # if not hidden:\n # print(ss, end="")\n # sys.stdout.flush()\n start = time.time()\n (current, possible) = item.get_points(show_expected=show_expected, show_computed=show_computed,unmute=unmute, passall=passall, silent=silent)\n q_[j] = {\'w\': iw, \'possible\': possible, \'obtained\': current, \'hidden\': hidden, \'computed\': str(item._computed_answer), \'title\': item.title}\n tsecs = np.round(time.time()-start, 2)\n if show_progress_bar:\n cc.terminate()\n sys.stdout.flush()\n print(item_title_print + (\'.\' * max(0, nL - 4 - len(ss))), end="")\n\n if not hidden:\n ss = "PASS" if current == possible else "*** FAILED"\n if tsecs >= 0.1:\n ss += " ("+ str(tsecs) + " seconds)"\n print(ss)\n\n ws, possible, obtained = upack(q_)\n possible = int(ws @ possible)\n obtained = int(ws @ obtained)\n obtained = int(myround(int((w * obtained) / possible ))) if possible > 0 else 0\n score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': q_, \'hidden\': q_hidden, \'title\': q.title}\n\n q.obtained = obtained\n q.possible = possible\n\n s1 = f"*** Question q{n+1}"\n s2 = f" {q.obtained}/{w}"\n print(s1 + ("."* (nL-len(s1)-len(s2) )) + s2 )\n print(" ")\n table_data.append([f"Question q{n+1}", f"{q.obtained}/{w}"])\n\n ws, possible, obtained = upack(score)\n possible = int( msum(possible) )\n obtained = int( msum(obtained) ) # Cast to python int\n report.possible = possible\n report.obtained = obtained\n now = datetime.now()\n dt_string = now.strftime("%H:%M:%S")\n\n dt = int(time.time()-t_start)\n minutes = dt//60\n seconds = dt - minutes*60\n plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n print(f"Completed: "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")")\n\n table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n results = {\'total\': (obtained, possible), \'details\': score}\n return results, table_data\n\n\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport inspect\nimport json\nimport os\nimport bz2\nimport pickle\nimport os\n\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n with getattr(bz2, \'open\')(token, "wt") as f:\n f.write(json_str)\n\ndef gather_imports(imp):\n resources = {}\n m = imp\n # for m in pack_imports:\n # print(f"*** {m.__name__}")\n f = m.__file__\n # dn = os.path.dirname(f)\n top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n # top_package = os.path.dirname(__import__(m.__name__.split(\'.\')[0]).__file__)\n top_package = os.path.dirname(top_package)\n\n if f.endswith("__init__.py"):\n for root, dirs, files in os.walk(os.path.dirname(f)):\n for file in files:\n if file.endswith(".py"):\n # print(file)\n # print()\n v = os.path.relpath(os.path.join(root, file), top_package)\n with open(os.path.join(root, file), \'r\') as ff:\n resources[v] = ff.read()\n else:\n v = os.path.relpath(f, top_package)\n with open(f, \'r\') as ff:\n resources[v] = ff.read()\n return resources\n\n\ndef gather_upload_to_campusnet(report, output_dir=None):\n n = 80\n results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True)\n print(" ")\n print("="*n)\n print("Final evaluation")\n print(tabulate(table_data))\n # also load the source code of missing files...\n\n if len(report.individual_imports) > 0:\n print("By uploading the .token file, you verify the files:")\n for m in report.individual_imports:\n print(">", m.__file__)\n print("Are created/modified individually by you in agreement with DTUs exam rules")\n report.pack_imports += report.individual_imports\n\n sources = {}\n if len(report.pack_imports) > 0:\n print("Including files in upload...")\n for m in report.pack_imports:\n nimp = gather_imports(m)\n if len([k for k in nimp if k not in sources]) > 0:\n print(f"*** {m.__name__}")\n sources = {**sources, **nimp}\n results[\'sources\'] = sources\n\n json_str = json.dumps(results, indent=4)\n\n if output_dir is None:\n output_dir = os.getcwd()\n\n payload_out_base = report.__class__.__name__ + "_handin"\n\n obtain, possible = results[\'total\']\n vstring = "_v"+report.version if report.version is not None else ""\n\n token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n token = os.path.join(output_dir, token)\n bzwrite(json_str, token)\n\n print(" ")\n print("To get credit for your results, please upload the single file: ")\n print(">", token)\n print("To campusnet without any modifications.")\n\ndef source_instantiate(name, report1_source, payload):\n eval("exec")(report1_source, globals())\n pl = pickle.loads(bytes.fromhex(payload))\n report = eval(name)(payload=pl, strict=True)\n # report.set_payload(pl)\n return report\n\n\n__version__ = "0.1.8"\n\nfrom cs101courseware_example import homework1\n\nclass ListReversalQuestion(QuestionGroup):\n title = "Reversal of list"\n\n class ListReversalItem(QPrintItem):\n l = [1, 3, 5, 1, 610]\n def compute_answer_print(self):\n from cs101courseware_example.homework1 import reverse_list\n return reverse_list(self.l)\n\n class ListReversalWordsItem(ListReversalItem):\n l = ["hello", "world", "summer", "dog"]\n\nclass LinearRegressionQuestion(QuestionGroup):\n title = "Linear regression and Boston dataset"\n class CoefficientsItem(QPrintItem):\n testfun = QPrintItem.assertL2\n tol = 0.03\n\n def compute_answer_print(self):\n from cs101courseware_example.homework1 import boston_linear\n boston_linear()\n\n def process_output(self, res, txt, numbers):\n return numbers[:-1]\n\n class RMSEItem(CoefficientsItem):\n def process_output(self, res, txt, numbers):\n return numbers[-1]\n\nclass Report1(Report):\n title = "CS 101 Report 1"\n questions = [(ListReversalQuestion, 5), (LinearRegressionQuestion, 13)]\n pack_imports = [homework1] # Include this file in .token file'
iFlPxdyCqBrgMNDzVoEA='80049512020000000000007d94288c144c697374526576657273616c5175657374696f6e947d94288c0474696d65944700000000000000008c104c697374526576657273616c4974656d947d94288c077061796c6f6164945d94284d62024b014b054b034b01658c0b707265636f6d7075746564944e68034700000000000000008c057469746c65948c104c697374526576657273616c4974656d94758c154c697374526576657273616c576f7264734974656d947d942868065d94288c03646f67948c0673756d6d6572948c05776f726c64948c0568656c6c6f946568084e680347000000000000000068098c154c697374526576657273616c576f7264734974656d9475758c184c696e65617252656772657373696f6e5175657374696f6e947d942868034700000000000000008c10436f656666696369656e74734974656d947d942868065d942847bfbbad207494a76c473fa7c5437cbda6fb473f951ff08b42e9b547400582027fe20d7c47c031c19fcb0c026d47400e7c9dd6eb08cd473f47049a406460ce47bff79a05a5307b9e473fd396c01163bbcd47bf8944115fa064ea47bfee7c69c063070b473f8318255bc9455447bfe0ca713e6cde616568084e6803473f8f2b200000000068098c10436f656666696369656e74734974656d94758c08524d53454974656d947d94286806474012b794bed3e1f168084e6803473f9ec3e00000000068098c08524d53454974656d947575752e'
iFlPxdyCqBrgMNDzVoEu="Report1"
iFlPxdyCqBrgMNDzVofu=iFlPxdyCqBrgMNDzVoEH(iFlPxdyCqBrgMNDzVoEu,iFlPxdyCqBrgMNDzVoEL,iFlPxdyCqBrgMNDzVoEA)
iFlPxdyCqBrgMNDzVoEY=iFlPxdyCqBrgMNDzVocw.dirname(__file__)
iFlPxdyCqBrgMNDzVoEe(iFlPxdyCqBrgMNDzVofu,iFlPxdyCqBrgMNDzVoEY)