diff --git a/devel/example_devel/instructor/cache.db b/devel/example_devel/instructor/cache.db
new file mode 100644
index 0000000000000000000000000000000000000000..eba28aab5e607cfee36521a00079738cc07361f5
Binary files /dev/null and b/devel/example_devel/instructor/cache.db differ
diff --git a/devel/example_devel/instructor/cache.db-shm b/devel/example_devel/instructor/cache.db-shm
new file mode 100644
index 0000000000000000000000000000000000000000..e7762b285bdaf5d8ee34b76d323fd18c34594666
Binary files /dev/null and b/devel/example_devel/instructor/cache.db-shm differ
diff --git a/devel/example_devel/instructor/cache.db-wal b/devel/example_devel/instructor/cache.db-wal
new file mode 100644
index 0000000000000000000000000000000000000000..34e446fccbfd0910c31bfcdac9d965261a56f7d5
Binary files /dev/null and b/devel/example_devel/instructor/cache.db-wal differ
diff --git a/devel/example_devel/instructor/cs108/Report2_handin_25_of_30.token b/devel/example_devel/instructor/cs108/Report2_handin_25_of_30.token
new file mode 100644
index 0000000000000000000000000000000000000000..0eca422f340b3fa213d79b1c8be756f7d8b644e4
--- /dev/null
+++ b/devel/example_devel/instructor/cs108/Report2_handin_25_of_30.token
@@ -0,0 +1,323 @@
+# This file contains your results. Do not edit its content. Simply upload it as it is.
+---------------------------------------------------------------------- ..ooO0Ooo.. ----------------------------------------------------------------------
+9b19cbb5fd7bafb60a8378936c7aa5d819133e2d88245e8819be1cb11e123e3d755f690b9056ffc949c3f08e010c60cb2f34128490a3c03b80a3e2699413ca05 57304
+---------------------------------------------------------------------- ..ooO0Ooo.. ----------------------------------------------------------------------
+./Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4iHhp51dAEABDngw70dphHsyxOaCNjHqyL9ETBeRAWZCf/2XMOuvc8ic41jAX4aZ/TW6DJgCa83BInKLMpl63Az39W7ALhbdmcxbZx2PSCEEWpP/6tFBDqcraY3x/6w9rQgvZu6Y45R/zOG7mSP
+T6BrAV0PBVTVOuqNF4vFdpSAso2WeoP3vUHLDxG6ObHnpfo9TnEDPE6UcW6LmnlGls2WpZpKgscyg1tf6JxkCO6YO+HQLmc9iun9O5JBGyCaqUItWz5xHigY3149qVz8eZ2H+CvUr9w2eoq/pMWWkLaH+neax+BuN3eRX3vtK5GjjdrcEWtj
+gzmQk3nXrriqmsGgbnSZEqX9bpKyX8VeKaR+osXCiuTNb0TR4/99oKqpm+ihVZo45PZcZQULXUr8fntwyEItCJVhFmapArVJ5zH3DD22lr6n8I/1/IGXiuruY38PzWDE0H85NPV2cGtMK/uYdzxQ3nwGyrIJRfu7LppZyrTZ/R7TE/SAOuuG
+CcuhbmWpk10kCRA6ztaltCnKSjtQLLsy/JgetHKUJC4Cveup5WOwzNC2dPqLzcAJGzVf4WpBGP5EoFxG25AVj0mqN+qgpOeNwDia3iL1aU+rDDomAncwqQyeUU9r7o80c7fWcPxNcCuJNO98qqoF6SVoNOGqrNiE+uDe0/RX3vwuw+MoXiOB
+TSIYZnH2xYlHaotkkFVIJGTS31s6d3raCVQMCS97wyEb32/6YL8hNsRdspzDPBF2jHNVLwj7+j4ACLqQNkBANxcmzHzUUSafe1bfNI/HzrCAWRcqdG+yfwykpH48DC3FDAA7g+ivnFD+NJUVYAdbOa0pZRxAt5dm4qI3ZKqxpV3V8phuqYVj
+KiIS+VvJfaPcccHAdrWCOIArsh56022Y7ZMvpunqKY5IHiIxqalLWybDzzFqrB6ED6wWNnrejMbQTv0qGHCphcBakIryIbbTv1jcCP3AFD4nfMjYlv0HMcd2oz6mGyJQA6TNctRC1ckO08HI1PYjHjneswJ+LJ5tl/EMcl20U1sUYBLNUG2d
+otMtSIXL38B72UEV/mkFqklgcLKMkp5JQYSQiwaPrln/rrBYtoTCMrykqt2ix8dAt/O514cfe21X+Fd51LufE23eliDRhz0XhChZ5XnTa+/XYAEv5oGjcg3KyUGcz/RzM4PAFUCByBlKSUFk9lhgelZyOL4SJ9ShP1i2pgjc50aqegeBTSdS
+VZiqrORddFrd6VfyVU3SxbLpDZFQgc5SAwFds3EjF+PJx0lxikMRM1OQNWjlQaFeZQmOwR5TdapD10xVN00X9VxhBVrRL/m2Msr88JdmBZObX/dMexhQd/FN3lzezBUjcW/FWPSf9FR5QOf3nVAs1WS483p8H7LlG4UFJOsl1XN+RhilNsoy
+riDVIWKtVy79sXMdQgTNFLEquF0wMzweczMiz9Dd+WEP3YSJntMJhfZXLm/CtMu6IgUJ1zifWYsPvA0DzeseTioMey50emcl+EaFDGgB1ZTGuuLCADzpWaMYJ4X4Uk6ZaLat6wSO13PRSe+2zMWLoSqfKWZjUV2PvLhwVJ6iZLiv6QnHOPcP
+NLagSMmBv7Td0s7s6gjrMnpSnsSsaHxNhHqbGGlTgjOI4ciCBifpnus2FiFnT9fisbvUkUQOKsxjDRGuGE729DzWbmak+84LtRznDAy36D84SSg5QPIc0nX/+lL7ahh5AOqoFRfZ+13EHB1Uf82ATKgLyV/3xmykYYrFlOE2/bzFcVCi82yb
+sZ1uKXCd/E3Stb7keGaYlQm5fIy5ihhLzjYuARtUN8MCFyOa/MW5QAz3aE44qIMAdR9I7tavhFIXwi/4Yq+0tIgWVvYdf7YCVL4enBxxqUHjjCIuF/5nQt5JDtoVWvqSrIMFnI/gR3Z/Tx+wHt1tebZImdf0b2l86QXXQW3K6oYhe9gXC7ve
+GBNWjaXNMoosqBHNjmduwzic1iXdQZKOawX0gwidxg7Ynmae8I6Zohb+zkoKeN+nfqTf3zcElYD5nk8DX4JJmE+F4m0U9yZgltUPsuG/JMolBL7OZG9WspHjGHfztOAd4qGlcmvZsPmGv2IN8KPcILnegndFjCBvOHASw2KzsEdVXLVBkb7C
+iZeDYSBmhq3soB1wHMu/JPuw9EhARvtlq8INGHwgewdcONa/eUieIWaly2ueBI3AsJLJ0U5Os2+erw9igHY6RkNknT4TsU5uLMe0ikJRfATsBMlZTwMBomGO62xGdx0K+VsurA12mRfQDikm0P5eW5rugQXROr6NXpZmkKPTMWnpNoiqJVDs
+okKdA6osZL8WzlN4uRlwn+pwSJleZ/51/ELkjtJc6E9lcjOUdLp6EfcOrHpvtfQV3d3FBIEyIDk9VzRohbelzZ5LdXRpZNDen9qVctbVXl7700JSZwdt4Q7wuyoxxVdQS+5Ts/sSBIqGLOCvvokKRHCWw8v48p7UnnlINdrEjDe8zIXGOsNP
+CxK1OwV/96AyBGv1hJ5WvdAeLiV4N+zVI7wjFlIAyOfuZ/OlSdqmu1N3Ws8qBLAVxxaNpxv4E++EuyudkITWRsXvPVKXDp6wgCD8mWsitH40QU4/YrJlp6pWieXAqSLaMBue1lBA6Ac7cpg9AfCsMEWM8fl6LyLtPlvMEqigESY1qElX3NHo
+/cCtdDsX6d6I9Lft1muwQDYiKyU0pfdru77zHSFZSfXcTXjU7+pF+EnR6cbitjR2R4gveelUApt63dsnhQve0pxCCysPVy13LIIk8pNfqe4uXgypF+JuVXhjp3SiVD9i+jt3HAD3tCVV5Spl1h/jigYF2KbLQWK8CxxD9rK/Le8ilUBfy/pL
+LoHvP50NSF4wPeZKGDyyhwGYVqUgljMfiare4gWLSRHFZkUqftkX0lIAHdjOn8nDo0ucwJnE7c05Ey3lPhvmWmgrsNBeyFekteaGW2J++o54CR5kAiMcT+hSm7tbGVfDOBSWcpMpdL2Aa/DQ3ZVEIjFldcLmc48ceLF8BZVMpNAhBZzz/Otq
+qparYO7FGKxAKF0j332LzoFJKJk/4P4zUBkVywyGulEyyR8uD3VEiQw7LWWrOpdQf+ySnY2JePTplbLKItjY42fIEJPZCjkI+1M1ufeAwE0pZ8n2a/nFZJJCDRnaK1WiDGk18PpmfujCE3t3SeApFBNJHm8z53lSZ5n+ZKWFV5HoOuqB25WM
+AHied/oT5aAQJw9PW0z640LtpyjKyapa3m4qGBKkm4AELPbHitYoL1PhM4G3HYEBmTAyaMnpjzeje8BR5jZVnoWUvcRvZ5DBETc3cgOdIbzH+AdnDty1cYe7a9KfKlm9qLUx77rMcJZBU8Dk4JwxiT6almB/KrzErNoBn3uVeZBqEHkhy0eb
+TVDfzxcCEN5P1uSm2jTr0aI0E/Su6aFoBCfkqkZdcfZQAZ8XmKKpUIqsiv+yc0cWg97ZzW1vDeKj+jmuPYH4H6wEmd6usHj8a223f4j6Cl8JpYQc8OxxPL0iPjnGl0utXnJeNPOCfvpty+j1df7uyV9TvR3ph2yM370UdA06TCfjprbabrAL
+RjpWBK9NKI1clNqg9eA/nnN7lwbVKp20+K3ftABB4fOwHpPi5lIYJg44LsbpGbH7HPuMCFIqS0JK7HbQ+xvt927fPq+D7RiaIbHNZJxdVCHohqdev2uoIwkoXwQzEVWiNeo1X4nOk6lnZr7nR9e21Ah6PVjvHKnXzeMtxKbHyWgeAbbJBWTP
+fBOnTQUk0BvJoO4MjGN42xn4Bcbrv9re6UBNLlm5Xth5/qNHbrr7YE6gyguTMYP+5/V5fvwJwkam1fWfLbBEX0KpliPexrbucYP8ZlzbWeRkKQ4y7X9ICVQmKttPMs63WpO/lHhvoELcJSBAvJ2OyAuKZUVztPYq8SkeFZzbyDR5MFgg0KHa
+9ZmbYr04eSqXXqQ2BxKHVIozgeS344CDlWovRn9oun/5Y1B6kGUtcKbKIQuBr2BGIp1e/43n4MyjpKT9eZXx0wBj07e5nZn9cIXpeZ1HdYorDq1ZdVurEhuj9NKRejm4Zt6ImSzseBZXsb7KFB9IArCRROWIZ8BQzBSkMzOgxKGAbMSyJb+y
+cxTEc7QoA8fk8EAXssO1UexQU8ADNVRM3vtL9LICJ24dzqSGyoPurvvNrGAFXbPWgJ0t32coxRWv8Jk0giNqLUZULScb1QF9ypaFRlzmzpFmAqITrp/xrXfsT5p/m7412WAGopizkKxs7J8KDEFzFSkGqCEa9Abwe6NVPE2p5L1PjiKDGcLW
+nvKTD3IRZZwVdOP6forBoCL+49OemsEA8GgpJASsVBPhDDz1LI3bHzy+M6bwyJpJRK1jXWm3Evpjhjf8IMaA2wf4XiKaxiHtQ1a1MhS2ddRiQM+/GLrtHVGxlPD4wOaBNMi4evqntwmTDxNbcp1FpB8vHmxn4PovmIqvW41ZTgNWEPxQPvCQ
+tSb14XkDQudFlnRGidJkTX63yaX/RJQw1fkQwnBQ/BqLKwWkBIRvB8Zbubd8SH+MgdfXSzCmFElyjzAg/Vm7dJNz9N8aD/WutNeAXBpOmG30R+H3sL1x8NALyfsO/JADsZJfEtOE3CvQHONgi4z7z+0M6byNUpAaKcLiNMuMl+zb5llIcg4a
+fnRZHDWZ8DZi7OJ2CPZZGSLeUOYUzxLZnA8TEHKbFrjIH7JsYfgyBNmgtlCI9ZW68L4f2ikANpGITideSCkCknSK7h984XrunhI6GPq8WqHYHiF++COiqlO4NXoCGJJVdEJVsC3+G8YsO1vYVp2Q0V4YRRQfMjCyXA0yvYMTWvtaikx7W5G/
+tV+QFIFajnqelhtxqeOa9K8K3BD9yY7k9f5IILfhnMh9mnQYl14rFzLB4T6NCue3I9F62MELfz07SDrCKIG4sQkWc3AZeCqeOVcnQsi5D8WI1MAyKV24NBx0vSs2d2LjoIDRrEbih5x7/ba+ghp8V+z13ZWSYZ3DyJYEgsxtZUB38eOPBNzl
+vvTA7QQbxd3GUcgEQ/h8ucaDNfEWYbIkLGcfzTCZl8MJCXsUOooqhW3Bw0PVJryQFdUugH9zzKSQrRVKyW+GUTaW4TfLfB105bOqqYQezz4JDXG5qi8JIWxtiVBFUftCsVvWttRw0IklHGUFhqciIkvgeLsU2Qb55Qs3Q9fKOtmJRAw3IXvp
+4dwCCNOex9YhICnXhV2UpeIIpPotTVsx67kdaXAvOcmuY07Bc1vr1aTFFI5G6AM9yMQr+mBi7RpBSWAKTUQdArwGpb10YXwSw+iPP6Z9q1QjdJuUHoweoF18/yboYhsPoHZIJTCx2czzJ7DSm80PHj00ni0XANdq7T0slaoOFJq6YusTIn/j
+QnLOlK0U4+5j9l1tRvkA9nUmGgrE1X+0hbauhah63SGjGf+QaClGErJLfFWOQepfAnXvjmMEA0+8WLL0HMSlW2J45j+aIL1gRJsGqeHGfqJ+FSRgz0vozguQifePk1bPl91UWaMbjMWIY3Hn4x7UyoJrQbVHBcjNJIbveJ8yX9pLmjonCo+Y
+JYy7xeym5bpBgmXXIBYpJ27uvQF8n5KLOZ8y4taGW0C6/J24EGyzGVwr30EW37/YcC+ZrNq1eIlg0ZbVUlfQm6X68PPBqA7Q12chzYxowE4p+U3A1dhUl8r1Py1XEBpILpb8kvz/YMZV26VNDQ0n2ABbAiJD6EkKQxK2Lk9afKfZIA7AitPG
+h4664lrXdEcR7kun8MwgKoTL+W6pTIyE4zpsGRmkRFqhcjn0DEGUHf2pkbIhA23ICuLX7D/I8pUIDeEpuGd4towA82unuTTWfUSSNG8wqWRSmw/jFXrpMAcN7G2MTd57rUmBlhJEu/YCJSPeqx/Jnl0jIludFcRZJ2RWq49F1bU8ougYxpPl
+c2G8l2xFSwBFyATmVRkpKQ9lVGedBTg7BMQ8Ir/mjKdNNGqKhYSf8JZPneqokdEYnEOtdGGzpdvF2g1lyX2rFsSs62xG/Hjou7nLiCFx2T+VTi3YGs5rhkxwCkT3M7tVF08W3znq6N08uD7sNFwjHf0WyZEN2hVQt0e8fOL/r+mvHfwnI4rc
+bUoRPqnPvNbqdXd2xHhqaESlkXJiO/d1yyg90Y0YQm4yHTVHdYpar84O2UC/soU0/Yf8fKVIYJbwQbz+H1NZrxn1sBbRIX4vz4FB04WPGXNxHcrz3L9Ic3an32naHV0vCi5rICMfl5+hBuXH49njp6dU/+0JwKNIgpyfQpdJqu7xRnc26bBM
+hrm1lAGG++03WKZZniwrdjudNS1HN4hIp3dPF0LxNM1+7eFxq+HAjxlQI5y0VVuCVHtNNMxwByGFMsJBTqfO3zolp3dBziPS1ypEXCskVqKt1eTEX/l74iJrRQbtuOQakIS6ooMXNizLlwbaGg0GyUgAjPEroI5XIX1G0zXxTV0mK3hq4TmE
+n/2kWBwOSRfkerjFyWjrXLdoEtWFF+ROqhzesIOtMGgK4i83xmYwTVoHWknJgplP0OtJHCgKNKGOmCl/+fdpl5IdL+YgZSQ+HnDx/SmRAysmTXwZx54c7Q0Z0UPnaMZOErxLoB7tN1mWCpNZJQZnd/qv834pr0N0p2zLCuNTCbqCRDYiie7u
+N59A4sa4dXCiewL4WATnWNEk/SBdzobNLhByqUjfY4fH52AmslA4a8Hig++bwZxmPCeBHWjSrMBWhZGGJO39i4CDbkZeiksv4unEB1VjDij2cuucrcwVW7XdpLNzyDcXFvHdjLg3vuLq3Cc/wPBt8U1+c9wQQrvd5amBhmA7nzTC1qudvQa+
+5bEFujNfBjs+TJbH4VmXgryobcuHyQcWCsnQrwYOcB6yjJzA+PvIC3iNiOR9Z+3pBv6ackMvBsJyC7EzaKkSAtYj5Y+tK+khbSdpqUQB11kugiJYmCIRKpSn0AD2afB/NWf9wH6EGZvDaHpgdaSYe9x/5QK5TMUZQEH48/fw7yTWP+xcI94A
+bRz6BjltYb2LrrO54qKzldIsqgRzhC5/sKwG4REEJnjRBjZmpDlbP6epCUVKEsjCNuTU+/5GiJqZX+C7xmxwDCQp1tB/tPCI1IFot+Fsi3ofgh941HhxEI3jgiuYqYhkkx+g79XrVrVaapYerFPLCn9XPFWJvrW5ck5HJ6dJOFoLbINn/sTW
+7c82czWsb5RhezJxkEijYjXSjLH9p4C6PZ0pCi3KgtQtTHauryXWOguTi0CG54VOEaAZWcPIMPA2xLLMWwigWrEZ3umIuVO0AcyzwWeeaCarqOh50Tr2f2xdtYkD4ePrqGzBUGZIjcB3ku9qrYuXGD/Pg5FJ1j3w/ouRhm5RBfqH5tkB3qJm
+ky47mH4UVZSYM0VYkhjYcmKt9cjnOlHJ6nnJ/eiqphqLdOZpNGu3hK5g7pBuJ21If5aqRH8elnj61jH4icnQG1m3adi+jyj+KLJerQkSJZMEQfUdJ0l3vfBg8iWE4orj2lCVfG7jKqIl0Lpl2IGnlyczQkACy3pgZPA6+O1B6uJrrcfodiBq
+bVJ9G5XwQQW7BUkNPWhcIVv3lVsmhA3+p9shxSyHJe8yu3iKPw+58QWe1CsfPeRPUBClkxgVAkmaQoFrvlZNw7Xsg/5IXrxpavRc/oz30o+096H304XmgxdwG0/7BvJMhuZvyvf733hbWhBov3UTSqgUDdOCVNX0c5D0V4kYV3gcc40pBvrZ
+WUwK4fyNYNzrbkKPm2loyo/wWLQy/RffuAPwstwqn55XAOWoEWAW7YmfEg/NRTaCjJSt5xSmAMHxEtznEbbXrYiVOlu7Qiu/uH/FV5lJCO7vxw47U/qCPOlSqPH0LbdI3GgWug0L4gDIqxxcSmstohjDi2/uU650NvKyIbi/zY+D4tbsRS9z
+QiCSQJWy9l+ibZVpeUauqjqFyiJFknyRl9/9Nwa2szWI6qBE8pZEvzCmSWnLjlpoHQg/ndnmcYgBijECjEkDyZZ6MMNrEOpKAASS+/hQizPp84aoUH7Y2lIesV1ozGK//dpVCu8s68hHw+ZyVxNEhW7iyoAThsmPQkLR9hAno+d0vsU8EtoK
+t1uBOdvY6gNxRGpcy2B62FZ4o4x/LBsh7DLkusSdvlfzrhzZLa8JAYGDRWII9nQmlEm5h14PoK8bpNnO2zNeuQh8+QqSMGIGihFCu/0KwOZDP5BL82CwTrOheKsXtbhD7tbUPYLNsejiHuRCtBruiVEp4Oje5cZJQivxHw08WvvUuOcjWpd6
+Nvi6rouhdHSx4ubWpSkkdchkjDfiA4LKLZZvEunGdSpOD9VqVuc620US8lTxNC+grVGvZOmzHpQaxaAeD65RBedP54uNspqCbcoqZYPh+wIcmM7nop/fswjP81faoK6b4w+wDVbZLtx0Dy/2iLIeVH7zk8UzsySyHotQYdh8n3LATdhFAHiS
+iQ2NRNLyapclWbGJRknor3+TIaLQrGi0b7tuX6XSSoB2ygjYNt9Ky1RlToOHWqieXXSj/WiDHhmLgPfC3/SQBJ+1BoZY6W7idyH5rA564Zr6D5P79u67cW3SvIZUvpvvArcKnn3qIuAolExsVAj8mXPuZgdmu9mvIhbx7Y2ItErvTCd5ht6J
+1Yl4fYK8LgS5DHKnhMFIvuQlA9STwBrUknLWzae8K13PwHzTI48dBtbpV+FqKLBqcGZauomGeeP1Ra7x1+Br7cYzL6FIdhEB3JqfEM7eIAvmqPumLzE8ub3hGhJEU1/BCMXmKLZeQgvGLSSIMGxagrkgLBPmivwzcJSY94F80NOliYrp+HUc
+n/bsYmgkcs8L5FjSY5K03J0Q/qjNO7jLPe+TgEN+zdwb76EUhGkhfPBjXP8qm9TdxVNv/eMy0sMBAxYj1r9smv2po5eLSLlCjDt2g4kvuCQkLu6o3fJSXqPj3vgBLoJsCqI/2+WQePT8VEsSn4HfioehzGpdVh85AZOmt/9LEVId/eJQJcWo
+xAKJ8RArY2Mk3MQFY6qXsUFqVp/CvTSoCB9DCWdODX+pmoh8F7ZpxCO0Zt3ZlOBXNA5rbo0SFEXPtE0ST9ubfnRRxblQvbCyDss0oeWH5m2/85ndqSZ/wRIghLNewSJJTrc5kG7SGc4KU22JNaq2NMz7bGfiKlMxOl09JJt1Aqe+/I7SRLCC
+0m/s5LIIJtKyzBriIa68w8iV8iJN2eBXDTIrPwJ746YMTOG+VM4voWIqKijZ3egTvUeq0YxJKtxdyiXQIB4mZK0fw751PF+P6c5/+rcifhtMHLGLEHu2a0b4MTljezXy9Xsnem7sxIcTrGLhNVs0ZzM0bpFsjnMvCSl3ng+BGJVoeCNm6Ics
+U6o6nl+L8LFxuwXbUn2hurHChZ5LtP0A8G/c7AOqfQ+shcr2I0JkkF4DSjy79s7MAAmp7CSSzXRVxLaNdYz30pqFrGLDvptuDouX4ouknncormzhcEKN5NqIVfCIztw/lC2q3pt/vXoEt02HyzGmDv9TARcJNQWe4eBxdEj3GMdhHNmD0A09
+DWLP3D3AfAoqQujIZwB2n4HcBkdXMSBkPT6w0jTc8dWSNiy29aC5pHv+r673QOGNPkSOpVfJVwbdHwZ3sdoMHZyPPXbPgK0yhF4/TLLVUGZjeHDVjTfDai3PFQWXTBMwSTsv+k4L+ygR1EA8QWp+mmCjy8JAEtGUPLLGvfPwrWNuvAHkUB0L
+kMAAfvDZsRIvAon0ZR3hYLmV6qqY/ODkM4mLSLPTKDl63YTnUqIwUiPHycjv1aFfpFfleuJ00DJH1j/G4yRBN+E3FHyCZgnAAU9yq6Jr1gGxUy6RQfvHzQ4Ss47fQ5+ULVotg+qwT4Vi7KNrepEYjoRK3ZMHvMjjS9J+wcGx9724wwN9CIiu
+orfAbzg7df6t1Bou7Vczc3jmwVWgXJXkZn86ugU4oiTIlaJI4DUhkpBLcpj0Z8Psb3Q8w4sy2uLLUQZG9wZzTdyGcdroXaYwNORjeLZz1UAPJJJsJTOl7PP7XU9CLPIXMMsBD7gzrcKVzMhLUqlMke81If7sXxnSTMzbtKBIDDmm2Foao51V
+PUV0tuLtF+zPxQLcaFT5Rk50tZNy+rIBfSlGOwOBPnzaNZ3K+D7GqFAk2YQ5pZQa0UaW3sXVp1ypu0nESqtlq7TXin8BEquxQO++siHsHR2r/R4M6UMhRPEV8jAuLFPIAUr9LPLOteQcR0x9q4PKLKc2gm3qg8CLYKwj7ebFAoJpx9JKzkwR
+ry/e/d0/GI2c8EMsi2PWh6qNHjQFNj7AeswKnpldDoQgV5hY5ZI3m0+GVpKcBGSEqpTuRVM4cSULr4tbVSkGn5vEFzOiQSFUZHgedUF2TU9nLFN10ILqJsESFvbEoOF4+MVWyziX6MWIuSbRAS1c2HFMPAtlzXYswHIGjNOyV6Gzn7pxVAap
+LV8m7IurQ9lu+RuvEJYB3joXh8QXYkW5jq3v0Byix+BvTOZzavAYV56jcfaXA2f4/w+RoDffxb6W44WK7McsS64W8nyTxv3csNOnK7OGTbiGbVh/jIh37nNon0sj7SOYslqtwaFWwarekH7JctcZ3MgtWiePNRjJHjVaBzmeVtmFg8OYQ6hX
+paZmm/PrGikoZp76+c6TmpBFoa0N+4ei1pfyJW1vE5U9BjbLNASRHlO6ekiKT1ACa04cJJuGOdFZcUd1TJdDgwLSkEilM8XAV0EHkd5mh1bB2GP6pzQroAfEevNWW0CcnlUznYjh0aZK4HDY2BGLhbYc0vE2ei3CIGwqf0Joi1h75r7X2Kcy
+LSyyXLk1rMXt6KWPmR64/iXGIODHI6KCti7St+NmBbEj79/JXwy6PP8kdBpB5VfhRwdiLWPeggSUuAAQvTTJ7oBIBPXpj9NTxEd/wN55N5Jtq9JB0CZuoIKtKZ18CH4oVtcI33UqGoFn5AZgzPa3BDc2Orso87e+bYlPLY5mtzaDAVXpz1dy
+cZ1TJK9wOR2LS2UldkHwmwxOGrYjWdOTKNuf0IHhOavUeeN8lNbd/IvDPAjSPbJ8ck8iu4b/TFNkLeAl0PW3CbUA9nOWrRlCRRRmyBNXP0KxLHLuujUpag9PK+9dYPJd6QwbPGrBPIQDHvemHmVj5t2VWWusC1T4mw3zyDICY9oTpFecDMKp
+l1mMPkENiGXqi6fsO0qK7blZ7+34aTm2IWUIW4Dwq2o5Yihb7cA7KBACvAAsPgQDtWUXeW2r8aKJMtwhSFgAbKYDNIJ23VSybmshyFEo9dyPI8W0eZo/Cl/I6ZkU5OtDrOLL2tRbSI0R/8v4faYWxoicWAzQ8SScNmFP2CUtN6ICZ4dqxSD1
+KnMATM30iL+0H9QpzErP1wt6/NfX+Uk8NEDYY1TGhFONs6WhdAl6TgwexBwuMe/ZubfhMiyWSsrJ2G5dglLrucE7dGQlSygSLLanTq55y4Ed8+L1je3nW5B1E4B0MOQ9my5kXsANW8ecGH3sRPIgcj3/ykrLGsT5R+vnihbYXAmGcDB45g5L
+yTUGd5xYG8Sy2AUOGNmUM3HXnByvxrl25Iq+LsL8KS28FIHw7msyx7WyZIawwerRm2Ctr/zGht1MEsXLLPs/48H2NPEc3gDwk/B+1DLCyhv64Ku65pg5uMKyme8kyMRiYgWi/RVTPQiEAm/HSXF1XbhEbERqWq0OVjhLb0Sie3A+zYBc7iLf
+ag5Kx5NKZq3fYhHQHkgR2dlzH3RNbZ/zN2tgRPQje5PauCNHXbd6nb1cjH0mH5vH6seuRLbFGU73a81lHSOToS7ZcQkrhYqLJHP3T8ZUkn4VJKHkWhvB/1t61v39RHnkLqXoEz7a66JEdcygfJCuH1gzjON6mO5x+YlFNvScWBUfYSZFU7rO
+rvpUOBk9vxJb8n4ALt3z+IADhMroHXgVfUuW7Psu/uXJHpV7iYLyi3ymDtOsefHDlXJdNWdzPeCmcprScj7R6BgS0V8J7YRa3PzQ80/uLn8RQMHly+tZcbJX9WZYGg0jRKfLxng9XPUoirszvoFa3Ggnzoxh3d2NOs7oJ0xGHN9mjjqGBxib
+Jc/pjWUrdSAuAzwaegMvzwK71LQvpVmBRXOFLQX/5QvOxsxC7txnVp2l0gxJYab3JJomoHDosXIUAsp7+SUyfWFuz6OWDcCBL3Aq/Jfhx8StlDKB9WuWNAMrlf4bVRt90GpofcDxEFSlJ95wx+a43UjogOLXsrjXatTOpyNhGvxrxcEzRPvA
+SxZVwW2WuBpZ0x3vzFH7a3VWPz8Usw/N1/779VaQ2Igo37Nyk1+VYTV7q8bY2R5voEVHxgYA2P6BrwVgiJ4pUQ33i8VuD2P0ciY1KOsDWYcY/w8B6IVChk/+idzGKAE4wOjzQ4e8rxxLGYHGY/PTlRxujU10zQwxvq8IDOFAGIMr6gewNrAN
+2WP6zKD7/oXR4lu/+dxt0WBd4l/8RUtqie1c5TmuXtw3n+LPG8RQEvMeGB1zbPg2sjtt+Me7+gjiL+yz+++ufjtAHt04+SAMugbCYwmgXqf9tAPUcR8+3muOP6eM1J1QY16OprSPgCD68qn3G8D78EJxhQq2clAl3Tj8OmJlKRP5ZHK2cKlZ
+mQEDZSeYNQOjGk2QO/JsfYwlTpz7TwFxGnM4dWe708yfxe4Tc7P2CKi0kGbpT8SH0Xoz2gWsUGxFmZ0biFdNtfMfwsne5urZ0JIMRyAj40fmBjZhboyojrYJtEYwtZ4HXd7/qD8EGH2R3jW2NIwzrj3pculUjh8O/cFinPkkyUHQgeEvyKua
+uplIWs+4xMNYmYX8QKQ0As+g8v150/AK3YW2cUjbC8r0kHzQngnVvNE7ziDe+6Z9B62jR6SlJc5vBst+mmbhKiO87kItA7LjPiZrn8aK1EX7vxfovfOm3cVJrnDQTrjFJolps1CdZfH4YxhmiKK/jLCce66XqHLjF/4uNFoS9X6sGDRJWAVa
+o//t3tQCnrq6Tg54o2proL3gd/3CwvpJ3Mp+GAhYspkOANrJepZQo1Y0jzycGG3vQp4pii/Xr2FxYoKddnmLQtTDVeJ6r+CX2+2BYC8ahrDUtzZxgZQzgIav5LCY2Q+rOeMhfDaFEpkcdCDQEsM0I65ZmriWkuPtp5QFMVuu8Tnsi1JKv+UH
+8sRwCNzus++acPv3Xgu6Y8iLLgYRou8r+M/4X727j1v1WbbxpLTeIUm8q3Nl07hL8fofEskvSjK59e2YZalGBHZWjzVZQohcvmFVUMU8RkRDrm8m8XkRkp0OYDFpSuM3atVBMS8eBR0h4G2cR28LLE3sJ/WANKy/c8F3kUXoqURZjoH/U8+V
+5GURzJQ7mRj0Fe5NA/JuG7xjZ031+MAazKtBPWHWzuwZPgccDjjUwlbZ9W6j9ifj+Xok1QXPFHeNcZK1/OiymPglp3yqATDK4r/umzMOIy6lioaICUQkjxULydTmmK0GoQ7FNAlbN75MAblS6w8CbTu8nt1aQgL6IHAiXniTbzQM3W1XQYeW
+QyzECxu91N/vF9hWaPJayoIjiSUMgcspXLZ0M8L7NSkVKhhZZMaSKcnhENFczp8HWBkdX5TKfGOo75wKTHXl/xRSo9rOEJadKR/fuU9Kd4NG9aBG1nb2ZB/z+9cZndTt1AP4edTDAzL5/nTXvl5JaB8Mv3/WdX06NSTyEfQvCS89nS3dtdS1
+Xy+VMVE4Kd75R6g6Sk9ARHxXYcrCDabAN/tWa64BaR1wCAcFGBDH33nZQ4sAwROFcoelPxd2zlL0NMSpXNaKGXLTJtP8la3WjpKl87vChjSC/e3P6XT2Qr5WN9fqga+0K08N6SQs8CCyRtr2qJWLxheuDC2b7W/DI6gTIdm71f08i+GlCJMU
+67cZFuHiI1zFPrxy0xiJu7kgiYyfRf7zzrKhTG4uSFpXh1CLorTKoMlRMvyn6FZu6+Fy9Beko49fg5KUPZ3VDqH7adbprTGMWJUYxeJqeVXo02iMviIc6NoQBMdTIV402m1lXLIjx/qA6+qXGH2aqUxxbCvFNdgMpE0u1CwWmIWKa1+kC8zl
+hXHCeJF+9qxMm1/bo89RMf6tmnFpXJ8Dwlor0q3IiYrrrj9NCBEuhsBam+i1vPHD7CX+Za/00yUmcJifp3RjI0k+8ytdLfP3Dt9UTc8ufnCwgV2SyPT5ITOUndju8e+kYBe4tvK+MfK1z2r/57i3iCeEnFpdgWLSwvVFxi0s7qLkagsycJ+L
+eIQ51OHysET1Yyw3LlIsRFcH4q6yEvKCh1tDGkwL17iext7mUkwLVP3CoqX0FrVrahed1mcK/qAOtaBzxz1YZR+nimAG1M5t6trP9EAEPtWYYuTRF918DhwkFrjg5/uevckiJyghtv1TUlgofHCpoqT5zwpA6jrgFK9C7eAzixHswvsmMvqV
+CSN12fDAx6BCewHImHs5Ne2BtYrHID4M9tnPd7kej6jsbb9oyLQ4c2iqwcvI2a6FS+jOD/hscOn8ma3LPvV71Fn8bYusickg03+OgRSRmZFTEc6GhJK5jbyfOCAKweOpORJQIdrUm5BkBVr7sawjpgCRUF3Gn2+10tc7rkjZAljs7FfRpvTd
+2LZkolFr5Cc4QRKhobwxek0tUX26v0Blss4ovlw3CAh9kLVruTu9iGlj2eSGxPkL2c2Rn6Xlt1pYMdWasPgLTfnY/KePmZ0EViXNs0yyS3Oi4k+S0/DEpHsSrQGI9t/RtPiG32XBqqf/gShGof11Y9hqAmhTDp+KFBERC3zpNjW6Siu051ws
+g/dYbgwAvs9NicMW0u9Qv6e/tUKvdxkQrcw6g8EA8dsPFa2O7IZjDtS1q1Nk9Bh76GoSqYfKKLFm6m9iBSQ4l/qE5o3DMO35CHJvyUQVJkUepvzuWwMkevhvm7XGWWW+oc0GWf2rtSUwBbgZGjn60zigeecvRmlhsktvXNGLBEsj+3gRSe6c
+LGKbf2AR8LB7Y2ScwsBo1pp03OCwKBTyF7IbCVDqCiseDgqn6Lj3WhOvDu76Nn0mD8YDstvqKeGac9y+Z+tDrGKMwNzGcL3X9LyWXhm6srKFQSNCzTVwsFFnZX70l87z5isxsBMNSFRDPaSjRqg8YKZ6Zfxo21pcHEMKUZhiaFhNiYmxlPg2
+7FRXB25Wv6CqweJ9dgvmeXpmx/SSxWDPXBtwc7tpZI+6zEYuK/h0DX7owRKIU1bO/AKdLsKzBo6s09RYKk0kJaseDqOQYP749TvmzlCkjP3c5nG+TTfNhy+nk4+9jRurj9pSSXySqtjg/OMqzhdwCbuhwPbruw3t828qmP1Z4ngFXr0n+w9c
+tbgDt6wSnw5zzYjVXBYUh8bztawK6n8qKq6FqE7BLtdi0gOBgR6/B3ZNAbAEjwlAW1W7HE/G2dW4aStQY+IXvO0B3k86mCuGRckTqE2m6ff9Odp9g2jjX0C51kudoWrPvV4bp0AvYpzSHkKuJYDNnS8oJFsWwF3SzC9/1MxM9tc+f48XMfJa
+EJR564RhKfj4OEn8C8SL9hShMo8YFgmVmeihjU2CzORyKTMHKgfpBZpEP3sFK9m9KDrM1DuoOLv6xL7jlzNjuOmmtJfaRmLRb+U4sFoDVYq4ZXfHNWe9U8pVKqlUg3bvbeNFl73Fx0+pf6HLv3RI09HQhMr45xc3tHIjER3KJRh+/wYhTkSi
+qItou2WfRRAz98qB1y8ai3S6WlZW4hng83NnnuCMf1RkG/nCNlcb3DceMaHPftmKYeLK3udiWrWHCPHmS7GzZvzOvzvPC4CUCsRAYsAskrIv2uoHpexC0oxArrGdIzQ/KA2AGHBcrYvSofjwvjG77A5xyxaEOvc6TuFdCFdTL3xmrsAvo5+J
+KLuwcvbFFsutLzvNygnsk7hEsrvYRNE2RHGIbY7pv84q7zf3eFSpT7GmamdRdNUF5ZNnWZiThmIqxI5dKnRQn9tz+XKrzm7aGJFEOYKhRp9LpsZ3rAYjiQ77sJyrUWKAycpOLidDc7dXBjiMC+QUGtw7Q0GYr6Em6EfLgPoRelKViPQNFBQ3
+yB+BvtJKwdozU4eHU8UvhSL+oOUiWcyXxzQRAJqiJJbc8fpu4hT1ENjM3Er1EmlyCWeCgjqGbuI1vbIkP7+FSOsN9ZVVuHFTK94U8HVlmegz0GQrYW4QyOjCXLYc+e5eRcZZUy7YumaYddg9Mx4n9YOoG42mgqR2rrKiiJ9hcUhwg/JKgM/A
+7K93qYg0sJBD7clqPE1/4WN/uxiyMI/KpzXQ6RRZFsDdePeJgXx7uRKcmXWaWIULEaYFuJgm7iBB8P34V0gz/XUzwhLnu3OztGzftNsiddfd+jZdJ6RLp7EfSN/x0zfjNH4dspfyz9++ToFFcrNzfhRUeMBx0L09f/RAmTe1d5bM8yrAP32I
+Ul0vntpL7GE9M6+spVgoq0vnlX4ShT1bLZbT02SDm82zK3PLoW2SIDBfwJN/PLvTYBdiamXg+Fo/TbNiW8oox50n3RkWfrVt0FviqQfWIBuDtN+V0sdXIg4kSIhexbfWT7Hq9Ez5bsjDMxLTCanv17LQLojsze4eBPClovPwDspUHoDqiIxT
+1YUt+jeOXsVhPuIMiHW5pgyoJQWtY6uwe0irB5zzt7GBPq5bUECdK+xRUamjRdud00V4r7m75QafzQSZSNfeLx63yJ0GDvLj11Udzh2sVN1I42ZdduZKyw03qI894iT4FbsQnsQfY42xT2Vtv/TruV8NrB5KFkVID7hb/HIjyE6suOKPEvAR
+D0s4Q6uve4cN+/UiHFxdWzou25NaOx9LmGnXf+AMTOHLVphZ49iyfKO9LWRUesHkeeMIZU9JH1pxnfulywCvRCpZbbTULww741IoUnDHDX/xGhPcEadzeMmnbdbI33VAtW9ftwLn77bgOye+u/ZAFo77PGGUoQWeJ1ggLMw+++83a1UwmRWZ
+eaglcYwV+bEs/Hpt1of34yxOlhmpaGj1n1i0C2EY+kqP+iBz1RwZthqkcLqXbt/MVPy4rSN3qVAblU/VDJ5VmLWrjdgbT3xGS28qprJS5ScW1yXS3AFqYFm8o+LXyBo5waNVg5qRD2ky7SHbFxRcpOLYcVbLZvF/4qYe5ApPtDl/Na2V6AYa
+eW0jrhzCrjg77nkvjnYKajQ/lx9dpBhcfobquO+3y8vf5cnXPbYgoFEhFLeJAnYZcFarTp0lLNA0F+iLBQ12pBK9qMCv7mW70Mbbf7JqrBZB2cr7f4ukt/C4LEAkRouuN+QXpUgx9O/72/TzWzzx9JPZo++1doUqF+YlrmVUfTecsVsxk1B9
+v8fBoG5I+GX78+k6If5JHd1JBgwb5IA0IqoV4cMxPiY4NaPfpTqo2UlpGYu1w4xM3EMXh4iBAbqC3wbsD3AynBqbIs8rl/cQl87tcBNpJNkiBZlZm66/7sCQp4DjXUeu0rByTKNbu53CPWZBZzLUdCUWy1/+jW7So3TMr0NIrDeC3Bxij8fs
+pJIhnU5F6RhUs4nADsekz6tQmslLc/QGAxja+kj3rasmH0Zy/DxXqaI01dF5KXIFO9bo9QrtTX8NY4BYfbtyKpwOOCZmuvZxF5g4oPdMfsSY7OJ2hmHrUN9klNJRunYjmfOQ8y0LNTxB7Dyj4n00CEcKyOOr/tXA+k4DgQ0Ebqz0tBj0gFJT
+2xu49Ij/TU9SQj4efVgtROBZdgZhupyhyOMZegcJsYAVLG/Z5NvZpd+yoj0/+ShwZTWSgzbnUhdL2aMQmKgC7i7fCCM8dwcraQZa5ZsWsPVKNt6abYqzA1DOAVovrnXYNNrcHKMfrBgzxB76PWVjsd93HWg0Z2Jj6XbF4NvWh4IzVTMujho6
+hxk1XZQBcULef0ZLT0TrAJhV5QglAkkllZ2/K5AHKf5kVddFhkOwD7jLNshpnMRyn45luw+TwqebISWztT4EdixfKCpfKhCvdTnUUPa29CcvJ+wrUrRAAx6v7YkZdHl4UlwR/U9rSfBNpXHHjN3+rn6ryyGFNqUujwJxFj2CRCm63JdLKWp7
+OIvi8XjssUJVLucf1/R1/gcOUAZU7lILRBsi6Zj6nUm8eVwYtyKlqsGdOQ94OjVDp03B8J0Hx++A7SEM1zU133ZRrwnVYNYB7WRFTur0g566YhGzVHVkHfNfAcN66jfucKRKhCTZXahIOiTtmcRR41RAfwUC79dbXDlked5UQZdgm0x4Ka+v
+9WlbnKKBcgRRGeyZklxhTJ7Pp+NWlAk4bKEkV3DK2L2GAyzbKBA3H+otl9Gh4JkxYBho7TU1x6mT3Ah5t7OU0oijFrlYwxBRKwJtMYxVET6yvbNtF1Q/rzU0H+gQ73tZkGrTlC2V43YlIaMOWmwuPpr6TsRjXsAx71AIHj+OBYrXuqiAqlB3
+cU94dprPLzYBgB2306UN8PlxfBwqnqxzmZPXE2skToMWyAOYldWy2857W/CxuLR7QNNv9ykqwwh5KrsEz7IT9/wMh8Evrmaw6cJUFu63Fj0At+kjb4+HvSErxfeoR10B1Ir+8b0n17BonjV3/KoeUFCkG8oJTy/onxkUgrultUdD2AHlsoEt
+tcpMdGtdz/1EENw5is/IKuiCkqTFOtENG/TVCrrtk7KXUYmfDDiAhPRLtwmQrvID+kjjASCFddG4kkNBBfAq9hPGHfmxeC/cXPNzP1V1IunY9PzV7rWRs32RJB0Jjk0Zct/Vj9pfsBdIH7AzRl4U7GrsKPV55QdXFhHFH+pJnYlDCEBQ6KfN
+aYOZhe/uGrterOIMPKz+/uili9HlhFk9inHW6h1iY+ej5PbOdsMoJuy2/i/YoNomiVntMSrhVHpxbmOn3qTfeB9D3hJrxhaYtjfMIGB+BXf8TXtRCDIhN10NOXBIa68OPEdEZ+2ErFXsb/RO7uyVrw6obycFPELlTlB8lOqkR10IDIJiEmAr
+0GP5TUIuVtW/m7a5c3E+FeGdWxwZXRpq74zDDtMqrOzopudYhb02KVx8m+fV+lCt/LYZ+KCP4ibF0NlZmEC6WSQQKtpgyeSw28MIZrsleclxWVF+bqmsksN6QWZtFHsHHYXsuUjUIIV0wEiN7r1sYdSF+YgRUgzpv7ftv/wqtOOLVhxbKWHf
+xDRZY4aa5v31u/jy/dgL2dzr/f+/pJ1muVgPWUFs4M8svLDt5shaSdSZpEtsOo02YjzYWJPGxlbr3BYgENjXGTEThWaTHTpAfLg6YI7u/jh0xiojwmbU0YnhdBxQrwUOOEl3iswA6pkYA4kmAtGR/4m3NQK6pGIq9tK5MEL/FYG9lH+HLHcz
+u5mqcrUZtpw3a249IYKa/jUsyO9ljlKsspi12CejkCHFtORDbTYskGD/fPgI3ggigIfIhRoSICMu8Y7IeSHcT2HfH+bts43s8M1+rWGHs1i2/BTyI5vMIIV4LNVuyxa7s8dpcZXdqkhCIJQ3SorPg0c98fWgjiREh1TKUeQkg4Yd9kFAmuuo
+OELpWBVj0QH6FVno2uc7tEtCZbcHcbitsW+yx3bXpNEB0O9ouRE9kgnufbVkKIShVzKbXtGtA6KCz6o5iW/omzDCAA2w13+1z9slmLhoJBR4z/wMBUp15XfB/pUvpcDXXIU0eEk/LpvM8lDoQFXU8FGHJBrGlp7msOdfIsS0hXnorB3ILJhg
+nC9MjUpiHL33vnWE7r8RFoRgXzuY1sriOOIrioD1HbcQjMGktiy9T7G4bHMd5eL74nD6z9+BrPlJVzbKx/UdFPQWqVBKDe8qKSfMP6up8AsjbhdzjMcp+wTVI4CMoQ7tuiJ4LfqHY0j2MHTvLNL0WNB5y6/mfaVx2WnsoXJK/qrP2l4Q0BYb
+N66K0s9dr80EkvQy4qkYGg6QLbVaIhvoLp5N9YiY5BwnaugECorovc9ArdTycMhj6iGYYDVLkiTRcjZl2vHk/99U8Mz4Bzq1Rkg1BKNAXN4SYNghq6J0OAh7UVK70vQhFBYeNoWyxglP9lKRUbJV4a+r8MOx0kkMPrR7hF/youiFrthcGYtU
+oAhsLCgAhBuhagiGAW6oXMa2y1UIO3J7Fyw+7jX5ETRYeFhxZpxILohpRWALhv89t3IdwfuRShOoDkJDEjlejEPsTXbhgFGP6h+UJMXeF7eQEElSBO4XHje/eKxhFFwRTVjnA+PTAG8f+Tt0ECNad0vk4tRS8prPlqsjMXNJ36bXmG/juN7s
+8VwCv2pJ7TtDSC/2LPPepWd5bly0LVsRRBBmtU/uHRB4scZGzZkRNzWoiS83toARiCZKRamNiaF41p2oDKTdbC9AYrwEdSbRT5uQwGQ5yNAle6dkHJyXLTnBn2SN8K8R9b9Y1hkJ60+aOmu3SUlEaDRapYPauEaW8IrU0pQhMVUAn6xniW73
+XMWoY+xtmmVmGHX8p/qY+v6KlMXHzvkelZHvTsQt5Dgxm5fo9DBDZMzxYE9JvPl+9gWhpi9MFjzygyYfApOPguiZxJcJGMIE5Qye3Q8e26R7lhdgZtxd38070NSAAsltiyUrMG8nDjIJrRfPqgDIDIgLEKmXEtu6kjKeKqz4AjYiSv4S4lgR
+CMnzkwZZPcI0l0k+H3HroxEqGj2JXViB+xeM/zPwNya05+QInn4bQpj2fiGF6TFBiDEcMBe97XRb7RWsGhcAJT7iYUCN4MJrGxYb/jsPefsO4sIBGbxUJlpHmVvkEplpdXGor9Xvp2bdqOos10WcgKdPw1Lc4GvHTPTpQeKEbG+qAp2TGiBC
+kMEspCTnYjIcBibABD3su8Ffr/qoqiYVqgLE27xVHvkQhWAVXYxeln+PHM8nSEJdyB3ELIlVFtkN7iEsS3yd1NSqI97M3iGqK7r3JsUlBzTWpVmdfgYOVmFiR2LjiXLXeLbRmKSd/b/peB+tpfh0sUwOQ33Ojg3caIWsHM71iawg0ipvcNWx
+CsudOF7Ugw0gL/17TqMVMP+tWeMtiorxScH325Oeq8II0z+bxPF1XtF9+MV7F0TrqckNozN44hFi1UpR/gjiIccD8QmPjAV2M/cawBp+ZrP+k5laG7ChHY5ZuC69wndZkDWZ6R4ArUpnv/R7+qNm+FmrTDiycziPXo/Zubqions64DZYwfMe
+KOse/xwPsmlipeA0vn5x2F9proXmjc0jycZ8q51AfI1SV/m/DA5tbJxhLXWxidrbz5z7Bwu6QXYsXyZlvbdJEApWXYOo98aobFEUfZ1vWHMzWdbYUgbZQt4IjZ6Wt3E1eCEI/khMDIEA+ScATFR19D8Xuudtuy2YMb2w2a97nxXlhkHaIX9V
+PweREkATxEe313tHg3/yp0gU8L4HbCkvoF2bo2OREPENx5cK0ZkcNYO+rbrZQNQaXM4Vp/Cj173An5eY4ZnfRHn3m3ZLkXmismHUv/VBl3gzlJ8cp9vwZr9A6w1FvnFaETgi1swQsp+OFMBZIm6PEFy0Vxf0ygOWCeoYCNVPnp9VCrVu/1Uv
+hKhNAUxAgCy+HsYEcOjISwtt53jTvOBScBIKt3UZI9L3rHaa0WpP03CGfp+/fyv4OS3Z13+rXHaFXEOiS0eSragIrJI69W3Mo2nvdg6Fv9djPlTVvUvk435yOvHNF0rGbzQHnoImGPydkIOXggoh5mWxwDjKD7vSeQLVMfJOF5Eusbu2+HK0
+iBGHu6XnOEEmf0GfDo7J4DVl3Jz2yZWU0hKYVm8CsoaoExYAeVagKv7uwihyvc/DwA1Xzn6CnkmktClG8OdiOh6jxgQM7D8oMUZapsgte+J0oVtxcOd76DzgCUuDpvD7RIOD9HF24nARMW5S1oW0FZ2vJUNb4UKQwIJfnm8tX6z9A0IJjzgF
+Z+0bWK2obSupVOymwbTTBMetDDqIo2BGbIOZXBbCW5uAujuT0tWJbmUHlvamz8aqziTeXeOnU2Nxk7WZfELc3c3caWYKbHwS/jd83WBr2t8GwqZiDFdXIc0Kh+b7ShM7B05zZdVERq62gkaTTHatEF2r4crdbpUO2+zweSAOMvcpqflYwAhh
+zt6Fk0KrlbY7ix9sRvkps96F1MlCYc5qeB05WDeHZNRxR9Ah2bbyYxRFYpicuwVgRn6asm2uteVtu+jzh42pYXXRexLkp2J7L6d3bJorN0enrv6X7dhKlYTaAUHG21r37bJxPnvesxecKY+6yDhvTP7HbV12SvdHeptJxON7odkJgIr5VpOE
+HwMu/+kRXJ3n436AIkXA/mDN01C8LGHEgzj5aQ92rOmLqmtic+e68cO0+IXu6a8g8hkp6VXMBXX8ES8+2WIt2yoUQA5Blv0noHLUumrUCZSPb1geq+ZoR7S6IV0Gk76ypLUDD1M4+L8LQxa760V4DrWCfRI22MfxZlC03Cn1tM6hiWBFYpgd
+sSB9EixJlEYaKllP/Y0i8YsrEP95oi9vYDtYo2WPCU8mxeJh5INx7whWhg1YpdfGqWasllJLmRpiApfl3G4JOND+VMeo7GDFQUhWc0AtmdSK/sng4TpAjU7EjzqrhP/pEWa5AuTvOliTe3+pizNtu2wNfiloXK3j1okJzNKJA6o77DIe0/Xl
+NeJ7wdZHAQNokRcO7cB1COH5CBUJ9kQqNGB0ogGEgyOGKt9cEImXlrzDiQlnXCMQLN/BCnwzX84rkeI24L9RRtgqT8ahy69bq2HGcv1yWSw5Qfh85PnnMjbOhrieWKjvrWlJgrvH9a28VK4SVNevWRhk4UTSdyIh91huM7GOtbYpY9bjJ75H
+UjJZ7fn+z6RCRFPPLH9pf4kJs6c0gNh4nu03Jdg83ISZBMC9o2vPmPW7h38XOk/38u+WeJJMERHWKRYnFu2Gpl/vAINvgM3Z/oCPQvn1jyRlpHXNa0xZAgy4DARMVe2RCeDp25mPQgDuwG0B6oCgOzE0hqKvx9ajQ55SpNJo/EBx5k8xvV3t
+Uue61C3dUcb914sF/ONo/12zB6fH6nVXwyL1zcWtU91Xs4JBVn+0ewgyha52tlhTTECiydRtMUB97TY4onUoTtvp/BQ4wXdqiDeN8mOCDTyKm55TW7PX2vWq/IvTx/FnIQLLoIaPD1LvzLVjtbRz6+KigMExpS/dqIiLelfb5i63AVkxbNbl
+abDNjUNzARq/SR9JIx5WOA+MezsRPpUsDgU84uIDCE5eUFjVmkW8Zmmxgi6d0f6Z6OiSQxS6u56qMiM70az9BpB19R5285FWFcyK9Izz9ZLQV63M6rttq+eNVgypMt4OOhQCi1TxQgtN2nmcL4kcTNv3s4637yUjDoaRRx/Jg84mdZpNc8nk
+/jDDSApzLNDfdiD/J8UoocpK+TUK250QxpjaTOip6Pf5IwoYIoSSX4JH8czPJbyCK/lm74OZMAwz4UJbbE9IAqLwXJiCuuorBDOAXl8YD2CzWT9/RXqQOWiKFn1sty2Owf361wyyqLYGc2CjJu8tTOUWigUW7t0b/EO//ZCCRv/KKE9sUbU7
+Ql4RUEuzlkX5Klw/tTVN0bBWH2pxVq2W7l0iOhAH9zWrSgcj5X30H+iYtvVfU6R1rVSa5RJpStRTpeJ2PbJs6c6muTpWn5WUgZ0YqMXc9e8kszXtNu4P3faRoB3VPE7b2dBskT9L/J4Wgl69G0r4RA50xBo92E2CDSryErgm7Wgm8ca+RkB+
+rBpZhykoLzVX/SmBq9gGX2pT1QOQzA1y9vBdMb8mEerKykaC1evqe8qztJLbfojK9y5sYwGDFjzSJCYxf5Mc2cq2hwjzVS0hVwa6UwPDAywh3E76aoGBihgY4ZyYRsbkaew8NNK8jltnmwyAHXPshp6oTLPnu4mhvLFgBVWJQJV4oQDnKdLQ
+nwCACyMtpRtT4jpraa/Su5lBB3IB4r1R8xTpfx6XI6R64LPehYPOWurSUft/+MnMqZNj4q3HWyBF9a4i3wpMG3psg+7VDFm7GaJjgL0R5iWbeEjVwjnu5K8gIOhfpgJwQXQYBjWS677bCA9YRdzMORUmZgxFvOTXJmoEIXGptIdoc98hDd0U
+KUqpfWe7mTt7m2s8QqGsyS7K+Sv08HVVrT1VOvav0nc43SEA8L7cL+HDnfHbHbeZy2vOH92n9xhNqxp13sh045cw8kQCHGly0TxA+ZFy2pey50rSi1WD8uR+c118gpjuX3b99dLlQ/T9osn+367PVVkdDnIDLDZjyCAjf8PlD+TZIIy8jbx5
+2Kr2WufXreg0vFh9kXFz2A69ntxWz5l2D8SABXOFpFmex5gA3VPyfJK0paOSPndgUML29Y1AIqYb8rlGHsHJ7IIbJlfx27clI/V3vNGINZBxMewIwxhKKCtCo6KHi/l7gtmjz8vpkNLCNFFAZFjZuTCkAHGQufVeZKHUw7p5QdNG433rKRe0
+Q9CkAST/TysfnNjhpHhysGQRrgS5G3zl6HOjIJeecUTzPawyoCrRYXJGdv7XzgMRcrdTDfil+2nbMzJ2OUOKzPyIjiX9MZSrbwwNTJV6UQD4qveAgoVscDrZfpJHQf7krgfU/azF79CBmUN0+gXnLaG+U5bfGs/9C1ooIFUyBUp2GaVxrJTl
+mRG2/vnRwneY5LO7Jo7dIqOWAQFEj8kiHFh75Lp4CqaylExNpyVx0yme4A5/IvLsyoHBJxMNaxuMfihxdlD8F/tVU31Oq0aaS8+iDwIVo//nfAzsFjr9F9vuu2/v0oFILfDJZO/xoHnHk8wtGvbWBJodvk+06/h+ATKNCBd0Bb2ucqrHE324
+myfx7PdF5khCA1HPWmBdyECmO3mfbi5dp2qjlA5XXJ62vcd9qpHW5o6k2QL//HTNYvcR6RHejYSh2rfSRR9FFq0PJGVEwL56GaHv6egkPkiTRAqGHLWxoovQ8HCmlu5l4SNBefKkCG7M+55lcvO5Kv15XSjgp2xwuuTvS3dkSYma76RrOw3x
+cURIzOoYv1KvLQHdRIYqF2nlYsUe5ZeRUrLYC977T6sK35MVRt/2IJB/DRlGESWgGYnTrcGPcwxGrNQgLjDx6zYMsW43wDVKPs3xs2MgkBRCELWNoD8a63Udl1LtpPZ0lJOa8x+uL4FPpD/pfDjMjv56C3SqG7h2tEqfI++DHnSk8p1pXlBn
+5neocXkJGgrsVSElgcPWFFXrD3p1Ug75olVYhDPbk8XPd+G0ZZS6l3LwF6q1JU1/nGZOV6etZkZlGgQMuCIXIDNLJAqVM5HDXtxd+P2ecrFAlWTs+eD6dyJv+cTICv+DetqhXDSvCsJelj96cVb1qU8kden2mONiTsYYMTf/Qw4QiL/MTFMY
+EhEk+QGIRsJSj0iigwxCSaqCCu7wNqSGsIIAIn2/rrTwj2ZXURnW4qfPpXfOWwLsmVWIUun1MCxvSJKinuHCbL3Qpc5zL+tyt1E4a3AelUNURSdbpf3T2BGriU4KCCbNSvtDYPEhqa96fk2AhAyBiLBjPgl+b6nRfwCAg8kasIbQoSWB2FYp
+XNjNc9d6fLQ0vL3tv+wNDG7z8Aba0Lfx1JdYomqTXBK9pLh7etDxQY7CkSvn6Z6qsXzWazFUVl9ccvie7HUvn3CNwfiwSb/AFg3cRrcQ6HwDzcJvhjp289RawlsJEf4pUcoOMvJ3a4S/5oSf7WrUBBIEjRslYczD6J5mm6OeqcXgRdBbbKZM
+kuB2jobWKEV9gS13GTer7t8o5UDZgnWN9i1tJhp+zH1fXnzF2mPNYieT77Ki+AIX9nLI2VSHAFnv3Uw6wHqfTNMH7uWWHzUL2TNDS4nyhJ4no56216cBw0dYPIWRtEkyNO2RtF8ThleWOr+NiigZDHlM1lEg+k6QWdbWbvblXzdxlMZHdMbW
+g+8Tb/VgvydjIK91Xw+Chm3Q4c2u754w5o/XpAedLFpZp4I2uSEdB1jQ2lvWy/jHs1ZvxSQ9NZ/wf1EWJFUxYi6zJPxu5FbMwMtYyDQ4TS20z8dp8zFdmHrVComg2IV7JuQQYECsOwBrSSSTHe10Uu6VYC+UniLwdZxDeSDjGTCIm43kIla2
+BiBbD/oyhIHG2H7H5KR/gpzKnEyWJBK1R5TVjS2NcjuLNPTDTHwP+eztk+UWkiRivDmZNBYk5TnOv9bImEksigyxiQoPK/eXcwr+v7WoC5iK9k4C4BDnYDjagR9p5m+gP7srwMoCeKzYS3zvxnY95h9ZJPxkmoubx1351CrGyWTr6lKXNdPc
+y3i1GCcEDDQFGhR32WSyOWw59TtQei5BQS6fKfvbDk61H01gcgnFf1RHlGmc+xMRTET6h40cTGzOIXWmOsJyADDT5GLQLMuCU0i9CxBERyUL60bIEKWiXsrtn7x3TCi25XO+81coDUE5grImDeWPs6v1+T4eEkVZmgcF7DoH+sXjeFi/nDKd
+zLZLnFOYmluaU3P5Lh09J/lnNZqeN9hzLwLtHqwCeq8g7UBe92/1LjA2d1S1qt8B1jkGL485Pxz6G4oSxZs6zj/icC8pC9cBnrmBEWAHJEUkF6MiJL99jXnAw9fjsEsO1nzdE7jKC2VBkNcVfkcKCbVEvHCjz9J5IctF4eLHL7fz0mIEoL5W
+n3npLN88yYtpC1th0op0tdkP9M/Ws/Hie7REaI30JJZSneTDESW4wJanmNII9Rnr3WZoNtlVCYez6qdNhZIW63WZmeiI73i3LCyrHywoGgB3vzpscclvy1XV+GPr6siUCUPTGvN82N0a+scwmLjzs7AVt4+1o3nBKvgcFksFdgKau/r8Xqoh
+7ySwHxqLdWvJ/FeIhzPVtJH3mFMdJzuuNojmk12hD5sNtJlPsBFm6rnlpqdtp+6Ssnsa7Jzn9GCwYfiRsaVfyKwEsoZSGsgjmRhxq830B68va8oYYxEj5pCCYE8BpELoqzs0M8kXV8ndLV5W0R5IjhYxshhpK695L7j/bEGuHaQ9AkM8dGXA
+x+iyiIf4vsN88wadp7nU35sZobGmTeSHjClzvzlXwW/rzIdKMqyfj6yaa8JOiRHKGMDRCGylo8+dEFu9ksNPR3ZsuViYYwCmf9wXqTfsm6ClJ/dWq3koE1EoGvTtvmgbK1wNHfnzA0xUUWgD6EjvmmVHT6c2uozasl1cGEUrG2Cr3LnRD3Nb
+pqPSaY4bFYFwUX1wj2lkWq7mSRvl5fPcX0MTYCasxyur9JzXqH90qKW9n+k4LRXPHB5VZAIatGFwp4VqYwAzgufgZ7x2ojqOYCFq0Kc85j8muAUS5l+Nn4ICyHIyF8esjnvygWG8CXiXhAqBI/6dvO1Serzn426wPqZVbNz5b03J/x+uPb6u
+mhU6qV+wvvVFaEGMl8UPRGngrCnBn+FuEAjHR2ic7ugdup0kT5I4Tx5zjeHtkYHHkh9hZGOQJmOAGmJXscEeMkk93yIR2UPNkXBkbOPjK/rJguQrwVkg+tsGUiohJqa1XUuaho3bnxKEkZ6Ashbb3IdSc8QI3BkVzpilDYsZWa/dlbn/FpmW
+SXOzDIm44FL0Deo2yNHxW6AOPcIBasCznbzLVi6pGICTI3VZN/pXuvPVxXhoLuSyL8M6SeHcNPj9mI9UA8TxcGuNfxQe6x3jDYRlcWqpifcWXxx8q5kc5OD187i9DKvdvvHdWjzBdzd1RdlxFmrguyjHxgjQ3Ga1l/clkv48PsBBV9YqOipg
+yET6FY+5RC0bmsQ6ug0FMmW77UsKniu8xnUJ3RP/gmykU/1nmsQAJfKV6/5XzemGaTiHcdqFR50Hp9TKQVA9PVejDg4dwr4WYLWADuEX7w9LcRrsYOmOBkbibDFZW9D31kX25Hhd1Bfzkx19gYSPBFit3ifJIKcRdiZ7f3mODZYr1lUY4MTf
+o+qXvStz4exVXFj74xXM3bBwYL4ITgtDkZPiTHmWcO2QpA3DmJ3SakQ33Go671bxjBXJpY6MN/EP45MLO45LIL0B7Aswqx1ClvbyX13wuJWzMirnauaH/sCi7/UzsgYx2BSnlGX9lupsNCdtk+mgxpC6I+ZG8nhBTBb4QoUdYLl/EPExM/WI
+h11tAECvFIt4bfiubpBQcxJiZV+a9e5G1VKIL8Qf0+TWdOnYEx07/iAiuheZm/Uw7ToE0x09413wpnQKhIcqbEVk9Oxn7CmxjCBLSHt2dlvnp5TeQdPavkoUWDBAs6e3g6/eAE4gm45aBxurrckAVBYyBJafnL3JEaCbxx5gl+tTU/px4eNn
+Hw1ODeO6FjzDFREHnRFXv31EHs8N7qjo7AYBxLTMte+ETz5kvCSC8qXXmY6PQadoDl/9Xqdl8+OGuoYMVrOO0zdF91jx90DR04R+BK9v3dFLKkqSZi+44hnAtG9XSQdaalbxquh/8kF/fnRgl2cFTlRl5FaEW6hvk1TLwl+sDe04L+uh432z
+FwO16ZJdzmE7qnTPceQ0mbruF+W/olPq56jo4uKUQKMirgLlFF7BWWzGBCuWh/NygYvfCK8ynaB/rcU14/JtG78nQrMDkrLcF/WFALJb9NQtJyo+Ua5WscDGwOdEp4JKQ81j1apYx9Da0xEqnXl3nUxvE15Nxe4LoLNBh1pRXP8prpN0Ue0c
+auOp4XJcalY/Q0CgPrN5ocSrc4jl44SOeW8JuJ0xK+Ma3K1eHixC3HCL3sGT8xjet1V8VQwVc7UnQptLVnjz2ieEyHWcuq3J7/BgrTvKCDBk9+VcGKzPVNKqyYz9acwM3EKQn0QcOkj/1q6NdYecDuFNcQ4gyUKIszPeu1mrabDEcbe669bk
+uRWDN6jqF73NF8qLOSFQrBSqQAvkE+omJV9lk5sh5xHYBjHqn+M9I9xZVyTS66oFbwkj5IGjJsmCxfWL2AVvGwZqW7hJd50iLIoubYm5o0NVyXUFVKeBcwNscRIqmhE7BBteYubz14XHwRfaw5vP3zT6mCKIPO8hKXUCByOBLtvGJqU6HKDW
+Jw6KfccOE9jP8IN37qcISk+PnQYSQ00OXn9i1fxS8DQf3u/Ph2F9/dcRgMjbgXcKifJXUP1Dm8J+F+c0/QB1Hw1W4agBbWCKCVfKiC3Pa/nWCGP14ExE1rsqFKOoKzKUoLouWnZsTVeJwKn4NibiC478r3KPIc5sVbD7xyta/OeZFrJumlN7
+XzGQY0M47qjqjqen+LhGy50jJwfIkK9x7xxEDu+r6csWdzJl70TNrXhqw4RGUx8xRZ3YXUfdmcWxEIPGsH6Li9SIcptJXFykKx046QSbYnk3iH8n/quLEex/WpsEYQYfo7zYhj5GWno7WaVuEhN0t5oDLH+wJris2X7jibXHhJAERzWCqILM
+QnykIUwYP8KXslk3dJy4b1MwWLVOKQzQ7Jk7DbDRROAVcWhZr9hFFYi1opnGR9pVEm+LvtS6yWeJs+uM3b2HnmyawagIrRuPo3aeTjDCKaFKXUcHACqVzBflvEZiIuMhmaZcjBYuqaqOY0+F3y6MN0yd3fnt2UKdYhHFCK0pbnpodCrnj1E+
++2dL2YQR7zFyL+zUvJrVxYZjjk5tzX/NLvqKjqNt+KOd7mwoU1w4sxvf4lBNH3aRZhWlsnEgR8p0tR9gnq4NsM1XelA9+dBw4xRSRz5CDHwD39gUwJE7apdSNEAuoxBYiZhEgKyxhZDfll9cvbrBu82uy1dG7RNfOkFf26Z9jyeVEZFZbMfy
+bqk3jDiBclb9iPMuKTWqF1ajWs05H2178byVG4bsWK9hJP70g3Yw3CLTosCKM4Jyjw2iDBQtrOPlfgmlLPqoufxAAHWtTuwtBfBlUyKE+glqxaPQjUT+w6/XZkCl3VzYOLAjYvz/OXTZT3bbTVkifIAaiozBNw3Dxe9BxU128izZAKf0H8El
+n6ntKyXKYROZ5cmv79fs9X9nhGAJf9espBNpFYGOLcebGvEnfr2HYzLY1l9lgKXAbIRldQQt2Df0LGjwskpjCGh8dN7mrx0Kss7JB9Dfuij4titf7K6wbCKmyZVX3322ruMfhVfE6S/l3N6yyR0x6v1csNpiookJD7EpvmftqGJSUtbkizNy
+MYuhj3a3+lCPdz95R74xDnDeI6gIEZMfBsfTlS1hIldxbKGVyPdcWEQKnGmbGd+8LEvtsrEe96AR8700qqx7Sgbsn4xorMoETwfQJSPBZDHjl8zO2GnMoJS/h6fEU2gnzxdl8W7ntY9twldjUrSYk0GRDlmaMhplmjJCdT2WZiqEkKLSs+YL
+X9TV1c48Z+qr+tQpEtPNxZlB5JFIknUbmvWwoWzc/mvvjF4TrmKFY3bXEkk/3W6Rhss2bI2oZAHTDIzm2avNvbh12KDVOO46KsFCuT25HeYISpQ+27h798ob/nJwWxIfPkXGMFoEHx54VJu3rU+GTMF0XSRVIu8O13Vw77Mpn08hIo88/n+G
+NOwMpazwFS+y2dqqPEp8eQFOQWTbhXN6yS995FhCvwVWCFpBv7Sgrc8IT8tCdXpYd4xTC5N1hMGHGCnW5ou6hbY+bhFCj9q0q8GMBktGiqxH49KpUc8+ASnAa+PBDBNEoqQLe6LGJs964EWsKk7KmOX9hH2VzeY8HvX0G6eFag+uSGUPk/xE
+mEwSQxQ6tFEFPVtriG8xo2RvImsgTiqgLLWePDf0XRyeh7O+HhYNWnRHrkz5hqdczMzluG+uW36URLpwjzRNjCEkgEHwP0VM4A5N3Wj7a8DH/h4irtVunEwCp49CTQicaX378oljtxXkpwEW3fD1jg6cK3xO4fKXxJEp1HStElXN4UIqYPUH
+cSOgepEoq8zPL1iw9dEnkOn7O2Z/8yOvyTRXQpX7cUVvv8Y1dIHVHJsbVw2H+ljulAsNs4UhajS/ioukgdO9bw27rgoWjxmq4LPY/rRDA+SGkR6ty1jyn1pPn6JPb6niFImk+v+y6Di8iR+NXkStfZidocx0VbPBzdWa2N7xVpVSolEqodrz
+EF9U+ZKRK6Ycdej741+Ee2NJ6Y6e42beHRWuVgylPPr+rsWSGWAQ0UOs0C5Dm5GKU3oqdcIgyFfqKLEw1zqRh8ks6sCdDIscruWqHfWNMLmQ8W+QQSlzaeBmQ60oOe4PNrOZFDUMfsRxH4gnGd23M84jE+7+eiP8G8xVoh+6lfeWKRNIZPVN
+g+eQDzoEBTItIbW6nyFql/35A/u1bO+00sk/0FxI5mBOz/pijxO5VTvNmwgg9vqfvNAhFGUiiYirP3I7CUpoI/8foBokYLWOtsyO4GiLFYTQLo3zC0HJoay6Pn11ybPFL8puUVJlpoaseWMVFArc9qJVyPkB3WVjoqWAPr8wx8YdxmllrT2O
+u6h2jBXnCtXlhWPsViYLqs5U0AugtJY6l/wHJor69O2p4ehBA2XBhP+vxXYEaucLjykjaJuF+BtaTuIiEqxSpDt6wWg3mMzoragKX2jpVmvkKVwHQZkrEju1/2IZ4c8sJu3T4oe/9vDNusk5+qkkHJw3n8vLyHL5n74daexElNOtEC7A4AeL
+krsZZOdTyAk/+zCPNdl9BNKki+i0wW1nQUK7uoDI3v1xuz8wYRaKAFhBc+pvnudL6HPQ5W4/e7xZ90DzYEtRALXebNf1a8rOWPU5uhflQPjhGdUDUKXY3HJWCTDw6jCk+0xzMY3NLDj4AnWmg1phHYIHMlqizz0pYFyaBj5FgVFA0gh5gp7k
+FldDOX2zQgV4iTWaF/V1qDwweCl4GQ81FCs8QnFBs2VOCFYdhSSmZFrCoRG642mLPSASI3M132qjpYXetzptIVSp2pjJ+h0OG9zgDvgI8QZ40XPLJlF1z1C510tRG6bbVCSavu9MW6VSASCNmUe0n2RuJRxzWdEs44CLN5lDaDl/HkYijm9F
+3GZYe1qPa8KN7ZMvJxJ/I5YTFj1oSmiq2vzE4LRTqdojHUeRzgw3fcHpCFQsdNTF6Qoa0XuPuOE3aqzm4/wEp2iV/ertRxoc//NZ23Yn0OdQyVCJJH/9/5wGhRZYhvLXxeO5fwIh1ikdkVtJ8axnO8i/3K+4gHtzpGlJlJIZYntPU8Chy3L2
+7rqdJtBhzH8Fr/9USOc1L47iAlNHz2yTudJhA5Av/eiPxq0iBSvDZsN1kqL62bpYX7Lco48HkDgXAkVOCWP+7BaEXGHwZxqEBzQDfTC3kfrnnGdW8zQj4Ey7C1lPkL6y5a/be9YK+1ecdfUglldczh89VMYarr2UmRovQGL3zkSWzB1KqAI/
+veHVSLrz7K44Vz8YjyKLYSMmKq0tKFjtlKAXJsxupSbMi1gsQ++g0lEXgYb2D/H0couqmPUgj1PyVBCpyU23StZ/EB42mhtw/SS+zXv/Nzjus/ddDgaF6yzCQxeLZoJuHmE6n+VohwxZGfO5erkKhKnlODRfCdnG5Jp9zuYR2wQd7Ms4fliC
+3xYqBoS9v41tPp3gfsnGbP3iYu+AOnRae1VtQalT1WqulZYv+DpGVYLsnW6pw7cBkhkkHVIEJKS3jXyWeEoaXEN5Tcyv4XyeJHgB1w6bXdyBY8pUB0wMYJvW0gdHMgMbXVWhpJLKsG4Xrx5xfJXFh8SDcxEdRGlClGET2Us7vh5ld/z87vva
+tq6Oia/PMyQwXSOTDEUdfqDRTLUYunbf1JKlrQ+cUgKOYmj4RfNBk5Ae8b0YvOeaR37f5YVowkhmcXzkAdutcZOhkj36xj9VB2NX8nvOofwhE0BvzVZpfu47b7weIo0DB58BP4kKAAKOrGyrQhTlDOM3AO3oNTxehL01nzq/6waNL8CA/DRj
+jrM4+fOlst+0FMDBGDeUw8XGcxuPWmdxvrqB7GljVT2PFc4amXB82SZ84b/88gs3OZfqDzTdUIjG1DcHLb6o3qTp/y92hjxwIXVgLB0Aq8SeisVAa/KuL8q0WwPSm2z4/BqzJPQLGLIznhvsRBRucCrfMNov864Fb0wSypezgfiiff7QtaMm
+Ff57AbqUVEMLZIlVT9DKPyWZa04Sv7MyyWdaBndGa6W2ZVMJFLXYTFLPPswB2yw9vCcBZH19DyTrjAYlJ3oU3T/xRQSxA15/iCIRpOKaUKiIbeqhJjWqhIVeVWbdJ6I3Tj/HJl48DDa83jyg2MdwfYXaq4GHCR9N1KlpmxY51C0LPnitKZtP
++lMjC4NDjEQugJuL8QoSzWzJrUmIb9Sf9cmXhQ93AAqXTpNxxDcztYcP1ynQOOsQA2o1q6ztiPL8eu1Ij7xYjB42GRKEhcrMSxtiP79MSVJW+K4IUP0vKS/EZR9tP15KsDPvcgIJ81jSvnqU5VGfnXSCbntlC58eLZlsLziDHfKuE3YBnVIx
+GwoWPYXxdkdm51q1Rf8t+0wOc1hRfBnMZTgoW1aytCp7nmKUU3G+oQ8kEaCtbfNYCvIk3TkRvHGZV5cKR3mQ6pvYeWH/9+8knb1+mC8waVXfXUji5PjPv1b4QUqtiEthLlebFwA3h5cNaVuP4VMen6LOoHniD7GvtGWSIkzj+/jGw/5ariZ+
+1K6n/DDqvDSJKCWzwi+PfRI/rZwUugXnAAD0RgEYpR2ZZFZND2jjWsoS1DO/Hft9grmAvSSwP8E4f9LI/JAa+QFbJ2VqYsjSQ2mwLmfM9APr2nGrhbsh/hm8EsVxEWuCpKaOX722u79DV9CJRfO53d1F+PtbfkyogIB6HPhJTlkNizZuNTBt
+Ho2YpRae+8fUJWy3HMdXDhaBojgcehEhTFe8gSpxSSgjgQa1Me6TcHgVbPvLXaz2Ff3uTxyfFX8EmGbFzBH595AsrtpwagVMYmxdwFqPkk5ToQ+Fk96BRHraAEqxmUVg06UYC/Be5jaH4F1FW8qdU2CBKKizclqbvDXO44689HycKCgT0Oxc
+laNAtGstEEpP2PRvOyzocC7t6HbWuRx8ZgHs4ZcKaNJ1AWANmC/e0c/BjxrMB8Qbi14Za5i3ae0iCIVRRncfLg13J33XfuAGd+PKizKQrTPf7TQBLX++A2/FU02BbqKJVKfm8Os8ejkOjFbb9F931LJxaSTXMYqeYt9oyqu5lQZXMWs2qzD1
+jnZ4qqVOho4IUB0UCNsauCJI9yw5Sqs/EcOnlARCxnDTz9P1jlVA951bO98T6RplzIm2n4hhpRjy49QwmL8hKn+sZBGnEBX9VNmlPD1/y1V4rRqyKn1wwqMpu0HCLIJ6FoDgNV3rD9IKDHiO0nHylXjm70xRHhbO5aHYnumYmT60P4GifGvJ
+DjBTAgm6j8gsg96dezttQJyOCGXpWzt7LHiGDLrh6vMss6QQvS+JMX/TzSanTcsuUA6nuXBnTkF4OG1dIoMOytAEEG3Vny0qpsECm/wpTLvYdI5CzuQ7ydqcYgJjgFA6HZTQJe2GnpAcK1eSBGj6TaTuokQ6cf5uDSpRrAXRvrrzQeb6EeS+
+wtKx7DyGazaaM8YwnQTIJ8g3hvGPF9p/R8PIJZtQ2AKPNw0lZ1L2lxzS7z2fY3UELDDCYMtClCVomJkHBkCQxP8Nel6GLEjANY0W2OByQdfI79drRbV5fehXY2bCrH4dRb3HpnDeCx4lYRXx4AeqE+3VlUnn2aLhO+EEsTc3KEWOoQSRrIYM
+Bvl/TSCNCRPDLVryRZaIZCLfSvA0yUYcR9LC/dOEuZI0DdfeuAfHC5mNnQ9Uq3wy5/4kziWDR6utYqFYEvFs7LqBw4lofniBYX7ZzA016JtIS+RlW7GyLY+NGs+G/K+xfinbhFkzkRvt0n18lx+L8mtZ21lT2Dwak4s2kD5RoPxQm1ICxeyB
+4FkZf4lR97we6iGNQzeqJ/9xP+Nhf/7Qy/xtAsXgn0Aimg0iMP8tpPBettT5fWhyRp4TTFr7i1Yn/lcLVwc8qneVVYaSOYvIdv32vhBoeLhTzMi4FqlJliUNSO84LUfjkYdHC2Fjy2y097ujgj4rngHC3cbZAN++aa8aExS6MUN2A69cBER1
+TfkAVLbPXSDroI1ZSN+n9LLJLXApPTFnwkhe+MGDeYej8spx4XS9vvwTIwhPZLGGzpLdXbSrAXc6ytNZnhULSOB8zzd/Rp9kPNr/ZjsLWQdBM9DbVDk9I9KE8/VLjFf7VpDdCUIAzDhIyTWm/dI3wzAfZqCvGuhCxJqLGixVRrWm/RZWe5Ni
+L3P6kFA2IccIFXsDL+9AlOwvflLuPYRjyGFxt8CpZLAr4Qk5Jy1OXHI8MCIRxWQ+1HyezKR7d5lVoEiNDP7ruqxJ7e+q2QVjtsy4PGS/+ZdAmhum4Vd1ykYn4nM6GcjyOD7wyqCIkcwjWYmNeSXvNmVB+aTiFFzPum7KLPmeMrsFyExOuXRQ
+iNw6mCJ4z2GBv5+BaSFLIpF1sEkJUxF8L+RO6oh3Lkvt8SMEsu1+9uKXgItnhDtdr3v0ayj3eRgRsgXsk+isVTgKrC6pVXbSCc4LUwbnz87nouX59H0yckxZL/NtJ5HkDtz9ocd3VjRl6gZd3Rid4CE1qt5U2TiX5FUcLg83z/jAH8qvdfMi
+beJcOyy2Ux+TXaMI7deCu4IStBQNqc10MidsqZIgWILCnHzITsBkECzCULP/MFlClb1FZqxiiXGoI4hQxboYEJS0ToYKNFn9Ytv4ZW7X2yhXGn9YQ1/Q2AQqoOTdPIXra1s+0HtdaD0hZRAQmbigP0YWGGeZkgxYIGuPPbvSysO0jnpjQsEr
+BnV4LuQ+rZx/qQh4PMiXzbeDeCmxQde2cA9RBUob+d/eD92FtzaSmNU7cRs56AQ+AagyiLG9dga7CC/8R7Ta4F12io69HGipff6GvaniLn9yoyUPemA5uWaSlGyyV9+wba/w8Yr6y4aGALXxoizcvhEy0xM9UCtdlk/uB9e/2VD+E68iQHFh
+ooupl74g1eGnldqpAvDBgTBNpcI7mAB0oq77EJmAdb2QyfF8Xy0xiPk2C84Wboa29o6+lQ6qZIdFS04j7iRLawg1DyHyEYty0wzYNljup3jrjpzogJGQez8EINNvyvRN8iCULatEDa01IFKfn82HcEHIdOZ7lWJ43QVEo6x2wOKA3UZgyx4j
+BIHiCD0jCVGZ47rfgxZ6bpXQ24ez09Pdsq/IcivOmucnQWRtmTKM4s2L1USrOXwCHqjMT5Kgvu06h0yax/qNatRkml0Pd0sMUhO5PyDbyQfViz7Bpo+K0f3WIOC1vDxijlenn7h3s1nWD7+HGlCFWPmHE+ZrOUpR0q/Qe7XUwB0Pv/wn2S4P
+A8t1mOD4zdjlU9psyAz2OI7pBp+jlAUN5d3I41cXOTp4utsUETll/XmpLi4tZa61OxJE9byAgKm4m2XktMarBFG0KJ3CS6qpT4lGBnXFgXCGsCx0OTzjV9VOl3hECg+6f99FOnpHKPtd+4FtkrYfR73Mr+JdXDvqzs6BcSZwxtIxdCezmZ/J
+zR7t2NvcsSCAvFI9gtAiVgX6vhrIeGoCN/TRmnYfZXMVt+YQSrWC652naCak4hbepjzMclNeajBjsoxmz9X/rnPzDDi2NAlmTcxeTtPfprhmKJ0imcRSgtB6JrX3y5MeozxwnpcFHtsjuXz7LXeMVFajbIDPCMQe1huUEBWRftOaoIjQqMcg
+8KcELsVzDpdtGyutRkQ0bJooA8qWMfYtW82mVIyymso4qVvtm49Pb/itb8e16NLn7vxRyXSNk9Pvfh2Mf600xYjpvZDCO9oLxyPTSMaf2ItiOplMLi5abHKpxQeWfBDDlIcPna4M5X8ch/3g3EUSmR0JRUm/m131L7UPGGTRWRm7beCvC8eo
++Dorjd/C7uRX2s/ZYlm/Pi6714ZJ1h6XQD+kteA2KYjGfPvdcdiZ0ThIHblpjPwzqgz2hpF+Vnc2StQe3Zmpie7rWqsKX5MUB+z7iL5do7qXrDdjnV4zCEW+lcHB6c6pUuKjI764v8r5j/7/tIngSzIAUYl1Q6gGsMPykFBvDKqf8ro1WxzZ
+fEXOasn0NVsH4MwIJwVvmJdIOMEoZLPO7mKz572xFaVbvisuSJcnHW6jW9Y4WMTBdSqybfEb3f1t2gjRFYJebCKav8XujLT3GZktAPh7yeGfQxxX3PWE2KJIbxGSOghdm5aEUSyXcuw5tn8UhB7SstXCr1BJ5o1MLVJP6PizWAKWNKrDQHoy
+7vBs8fN9t4426mxCpyZ0pIPxW9lLr6W+jIfQ+RpCsmCA/crafAUli7iWIV5x8mawsxj6kthFU8/Y+ng6S3o0JGq5x5RrPWArzRpX+WFh7/DiKVYZG80z0pnAvph6lDa5g4+aE7THUAYpEmOylPJ2XA49p+Jr3/d44unaK2L+l2+KGbaBWsmx
+7uA3laUIAQwcGkd85TPHtwg77o5LmMqz5b3vxHR2A6joaYOnCXpLSeLlZ5yOAO6i0hy3w+uDI/f+Kmjt/xLTv/r96VCfPyPHXDC6pw1ChTmehVAQPKX5ijCMlnrnmphGytXA/xdK7QLvrfhv3SlfBDOWxcchFPoBF0cQGIt5g72MC3KnAJfa
+ggzgcfomyAKGMprtyKApyxmeufXRY2Wb/shY38kc/FaDZLT/lMyLfnb+QbRtyPvqm96Ega5ehddoIwECgH5kNzvfse1fTtVmQEnrrUVkRCZpMnKA5N65fJmx0MX5ElmQkl4+NVIMucG7/kFdCdG2Qt2halz2KCpSa9yFzriWNY+KfsnGt4l8
+O8w3qpLytpMxjORrwDHJwxCJoVL8qpDaZc5ZeONb1vw0B6DdK96CpyhGQJHGaaSl0xhEE684wbNZcZbVD8ueRkm/OREb+Z6N5hTQfBvoEBLIQRCORwL0eww/YKYfN9Vo0OdYruvH+AUwdDYnWBjSAM/csWobpbzaxTn62gWlribI57QtrDXr
+ghZFdta+8LL3fWnMds3Pp11RA2NfCtYO+reE2Rf1TVXGcFgxJ+wBMPOAOX7T7sMWYKpiUsozoOO2Hew69rR8Fb/Y51MTPDv1UXka6E0ehWXewUwQn4p5RQyYJPIkDEgBzFQnFz3DlkkJ0f/OnQiQRwMWcdYwvLuPAUwLg8aCc0qcKYLw7FyY
+BZp2HkvH5eoW2TnWck49KjF23t0wZmmNp/wdDqMojab2/JoCX2NLVPGFkF8nRrsjxkVATGbb84YeVLn5jmXgEvVculivjybar39CxioaOwP/kX9uQ4V3XFhLelDiSJv2Nl1NghEb6sHB7bw0ikJs42DB73sVuOAV9fhpyE4P8PM3nAFeQ06a
+6EtYxv8O/2hGnv+zNrkcGxkrA69RurwUSKewc6lRS8/UaGf1UflT5qjEwZhe3eEhMbnHNW3wNTnxrgpq9A+OV4snFdIU2DdncsaSK/Q8szTRLTGxd3xcsShEvvNb7pQ5ATEEVNo5Ya91DXop5SGet04gBkrJ/mu0myCYxwMF3DOSqLKMcsFx
+PUrEI1I+iZkk7Okz4I5Sc9SYXN1lj4dY8BgIPAvJZoHHKZpHOTMpeONcYTdaXy9beXhAubcYR8VA1yCnWSIQcvUfmsQGbrKSrxQn95Cz4Pi1CTLw2WEmT5ci7owQin4u1qtR7j2BLFpXyUdIGiIw9OwF2N74ok1nGXs0DJAz3XoMq6JzWiu0
+rlcp6DgAm6HKwSs8VKSeB4KnBiz6+Gacn8Ttr7VKVHNGOaCGHc6PuR1iUCVwdJfXAxQ+7t2ux9bAVqWVGyuH1s9MYWMibcf5xccCA4mCzektAHMG9LKUDfegXnriRNQRygBZagIe1yyBue/4680p6lse94aVNRFxMX7f1VCHeHLSiQ7tR/0d
+r7Zk4bb7MMT02JIjLs8FZfXcvuxv8gWuXp6cscnlyrOXoWrqJEud2k1Yg+9v0alwQEDeWGOa+ZfNzIv6tsbjKVDdJk9pxwnK9huny7v0XuOLlbzFq3//Abm2MmA6NR2Wx9Fv16RgqCwpqB9S61OZMux4dk0ZFh4Rmz4GigLB65NcfCZAZcmK
+vUZvPJukpI5v2hhUGw0GAWlE+67gz7KFkhx80sc9G1EUmvfTci+1ZLVwBp2/X6phMMEfgKoAVEBdgA/ZYaHgnHdBCZEG+dRdq8RyF8CcH3MPs8DjewcmrZYjoWzBNed4qj5MCix27EHnFnvsuWgbkLgX+q3gVXQeVQnr6FeP0+n67l8FBJ8y
+jGD9TBhEBmyqqKYO5h1C1LxZfY4mzd0spdymmub+06j3AFY+iPI+5+/Zn8rNKmM7JqX6vEtzuhwQUbEA9N667Xy9hOaSC3JZL+b0Wlk5IUMd816DRIoOGp1nV2CSjsJQNXrRcfbPCvwuempENf+d0wPn2AmRfosoYk+95u+5XAuDS8ZjYpWH
+whakpkYLQh5I249s7rxUSJcjRT6eXF50z1G0mI/2CGjNZDcR4AK3A+JRugyEbJtH9nduomZNo/jBT/obTcqwf1VupaWup82Dik2SkE6RYLHUfIEerkiOPwdX77/7CAukPqNItXHakyYPU2knosvXYyBsg6vkKWczT9Zif0Y/jR4vrCb6LSCC
+mjFXFWYIB/RUv9plS+8iRbO2XuKKBdthIgytUBUhox3kkx28XT4korMvhAP9qWjBpwbqUf5RaWOZBS/647jTgk37h/iLasAgQqxo4aSsn7Pn7sXIc7f+1zR4xdws6qBDJujKrIU7CxE5/st0g13R1DWsIRoh75kpBQzNgNoIhb+/UM3n0u8+
+WJHqNUly/vVm+W9Z6lxd8/i+sqLzZtwMZBK1NngMYwahdHyDiAWtgjzJcl/N3XYjC7C+fuW3KFeBs6jVTecvRE5B6GhMJdZCJrGcTliG0WjKttSnIdE3wdtotNpWIHzYSLXiqMLOP36AnAVwthFxZ1c+d3HcrOU3rSvtYOS//7cha0TOe4KF
+uQmhoI6zKhXdZdGilxLfpYW37+4vE2EUsaubbkT4N+hmghkDje2JFvfuuWwlPYdj3pF0JsBcdKxRyw5xMsPh9XNuhouvYcLhRwx84W6rG+UoKeA05R6lC54HPCG3KM+1wAX9Ca9w6J+TmoJkgEq7wxOzvjuZ7k/lizafZY6LW+BNLcNyKrH0
+LvwV3nEs88Dn0NqQNM1MuIKinIyOihDZ9L0LcXuQ04bOej3kK3r0zUO8ZHQ9MrPbjZL8zZ3PQpcqOiY4wahZiuwjVCO28L/6uvUNOXFn8/eC8uljEveM6aqGL4lSgIyXluQZeOSKOj+daW7KOqoVgys6PcnCUhQlMd5chtzoVj0Rq0MXQKDG
+OYk5tRw0wUsYCVYz2mbEA5A0cXnlULDtqskt4qdzQuoy9CNMoLOQoFYo8ugK7JK1hZt55maqjRihPXE8171wiIfdJBbp4NN2CI2wFTuHs01mtsBn5tH5Eh8VtkKK1maq0w84SAQU11glS2/u0+IevJYAjd0EZXqI9BTwJ/1pYMMnjqUBIgCW
+aSKjRtv394JgdHtBzoIREFgHnhtfHwVz2X5ZDmv/IeLjum5K/d1fA8igRPRWLN8uImqY0nw3IBBbMEyY1AEvKHwTaNj6XBgGHibLtvi7HLCIJM+mYygMXu7L71H4OBAn/i/74zbCO9IuUIOWCNif/9rp34tyaJbgYpWymg/uimHgAourlVXL
+pUNDXhrZJ+dsDoXkXkQzf7P6m9qkXlAAfrGF1JWOXRxHSo7x9OsFXmbrevUf69+QLdZyOqqPZKENTqbNAccXavDj43msUYUsn5fTF3YyYLm18ovJWD9EpBTss+MZKkpkyQzuaBRzMdmH88OWy9PFlQwuPwEwG+UdvM3ZmdRVj1qWW+y2KTS/
+SDJ8QKEBNgaZ02VvlUKsf1LTQdErwmGnN/jC2b8BFqPjEjHPY9ad0ZX+5SQ2ndttM5NNAgtITpeAfavHk3M3CWtImYDISzP/Z1u9D+WLNzVcOEHUbTJYvf++fbKfmu1Qmup5swMOS5EawWAoABDunym6R17jWbRe6djd3gRO/uqANEEgt2EM
+IIV9b1HPLq8n4iLzDoyhJ1L9fhecl+YzGpO0PgsB4SGqJiyeNCc+oomKyk68iMvQJwBiH1wTDdlEf9OmgY9GjMFkTgCdyq/K2nlITo3iwS2BsgHSmApSBEbkMQCfZIRKcjcuv9xHHn47pwmwWhtsP1MOPG+u70Gw4+YNcdPMKMXYmpK91YhY
+K+bRoWDLfm7wknEKH6YfL2Qe8dsjRW6E6NDha3+o4c9rSJv2D0Ik/6xOyFS/jI62aV4M9B9Bm3OiT8dF9Ng3SL232PED/GGeuNjeQa5/PG1aEldLIPHd8Q/TWz3HhUuYLh2pQ8CGPI45OgeXCRxqHH7F5a+npL/l9nOgt2UhpZeHwk3LB+S4
+UHxbbJL4ucsvbd3tpphLZNIkLS80Z3m7yVYVyhljsA8BqrYNgSrsRIiNwCdKD7PvXPFhuMbyl3L8cSCjBl5xiE01SWrgrPuoM4gm3YJhvBe9CH5HzITUjfbl+46M+tunvs6d7yi/O2UB3Z2kFDkAoB5pkpK5+Fqp9MB5e+wgAkpVzSCH+q5y
+dBduLW6ZOTkzblmqOed16Kzdun+JeU0hBG3GshiPyC1aFSqEGRQV9c5aC1b6ZBmTALibqwPc8iajx44BO7mg+PB80XAmTqnYxo+8pBfkRqNhlFMwIfDG5BHGKbVylAOTtPkFEGl884+msnIb6+v3R34Hqd0cvwaKWbj0VMHWR+m4AXoyhnjl
+adQN72kYXn+2K0fCCmdl7qwIQ5lvR8NT74iShg9PLLfWnW+QDRA4Ulg2tkjihfXMEXplHjNQyo0ojxTqqdVtYQ+lgUc99zGvDk+A4NfjwZm8OCTfEo6NNfJknsy5GhW14+t7zEypLFUPUgcG6eyxpRNsDEGHKAW2FjDHGA1SaTRg6CjumKbF
+pkAdsvVIFh41wo7PS8zC8EEurLCxwqMlcTnpvBG6WgQHY5/dR71X9QpFISjayc/F77brj5FuanlcExgrd40ldRWnXKgBGc0gT3/Z4PyFiNrbCSzLStyHS2v7H3805d8zr9t+4Um6fdT5z4AWtL0dXtJPdRFS7jpav65keEswhBzKgqwVDqNB
+555eGvp4aY3soRf6fsMlWL92GROcPIPwQw5OVwxCQz9oXeNBHWdsuwpIQMU2Pj3qkArAySPCTnQVtfhL7u5vFdfNAw5xPzwhoIZrM8IOFitYfrA5CrTc7B9U7FmP7oNGIGi/T+YaAq8o2Zah9LcrpBlvNmV1Fb0wlmqG/t5Cf+4MmQ+Q7ge9
+OiLPMQaT2IufvSBt8X3mfBQVMhVP0l+GD5j02NzbUbsm9MRCMMaVQu1UZPa1Ji8/QBZCEJAgiPpzs3hK/KDWe6BRFRCreqJo6/zhLVx7i8b/DE9auoTLZ6/uHr34KOPapM/B94S9RAT/tKxQk/32y368K4whEGx8ggcSf2ahDYCFJNWwW2tj
+yx+fJZ13MrIDVGS/gOpoatWKvITjjU8EGdh3H91DI+4ngD0Punln55UXfJD3fhQShMP9bJgRX7UcYWjXnTV0kaG99Xw2W3dVH6GkDOapBQ5slWMyDg4P7jMxAAGumBJ+KN0ivtuPHKqeohPe+5z2lNBLISC+XhYO6f05+EyBowwygFyliNWh
+WMnBbu8PWgy6v1h3lWXAjTaK4dQreOdF+UtfjnqUFjWPlQh85aKL+FVDk0uwizRtkOhTYyJu0sZhUE6wbLpmaNveXP/A7BE7NpXL9iTgp74ziY1Yxyu3X4c+hX/Eobwpg4QeIP2uvRTbzhfmqkpF3py8Jnsqec0QN7AJj3DNtCYrTyUGayWV
+vNDOemAkfG6tYjRzxtOlW3Ks0rORPHlkrEqJYDWJSGqQQnlshM5sMZ7H6jQut+3SoMzYvj4eZ4q6wCtTsDQ9TUckNIvrlqJf0/3HbGyi26eqcBHQ0ES1omPYEU2ROL2vunBU1Iw2zek24iz/6JHS9XkJwScbfoqHQAiJX6iXkOnNKPQPKlq8
+pf4De7T/44XuDyfEsQu/A0NXj1n8JoSWEZTityhwmKk3j9RiFSBTxEoWTQ30rNJ91jQ2mFaq5sAnbO+0vqXOh4Ve4PIjA/PiKxWARosHY7ku36lcAxi4M96REcQHE1uSBXi2Yyy1VMl/CQC97rSg+zTlxOr1SdhuTSxyREz8BzbmzWaNHN9/
+0MMNWV/OG+Z/xT2yilTxepNUVNcnMNvSGwK1zeK/4jPIYrXPUaw0Rwt4V0ylTKard3Z+iKFFSYkO0c4C7+THzs8zTAXKcqHJG9hNm1TrGZ6+dN8MLj9xc2EbOj/80sT+tBKcl6rPdW5P1c5lkrFz1na/X/jykzxFW8poSoe/KodX55Jf3o+0
+2exdn2/7xYWRqlB1Gzi7ZgO10JNL3ndXzj32o/KXj6qyz23IhqcrZk0C1mXK5Yu7+HITAeyeL7V0tLLdrqwFYXkjSViLEez1FLfF4BHqYZd8Qeme8lKj7epG2mZ1V/Wjy8Sx0JO9YibqP3YCVX+94yJFDIdN6esEkgB1vKeEE5tQHF1/HA3Z
+x0rqiwa5MObLeSrLpYcmrhd527sCVvDYOPe5i38ag6IZfMSOgvU8+7B2nZEp8I35Ef+iRa86AXf48wy6SfzeEE+4WrPE8DVIUuUOyEVM8mq5Sxaiy0nxhppHUY/sGov5uE8Ueusc9sYakFxoYHHrh72susQUdGUG2QtWJnxbgUNbkZjijYLj
+8C5/qJ/v92ltK3lMX6a8WQlUUbAFIq5UIPfwYFxL95vwECs8o5+hcilUlf2OyJja2ulXlQTq4D/eaFJZ9gm5cuTpZ575nWKrW9pe5aFsPpj5wxOFFn5nioCqutQSdKf4jLM75QeRNzKdwa0A6XATRT89iJuQIi1dCYiRto9CuT+B2ayDbbKB
+Q5eFNGfjbAlj9Qo8I/lZ7NjNnFsAMPxzXoF4Hh83wB/kMR1mRyTs+pSHIZw1bWmaUiy0tM/MtT7+JZWtWExOT+Z3PrCREQiBc/+SUosTR84ADg5GODYz8SkTPJPHHfPgZuKtTwAFgPIeJg34MEcODk8SDQNHLf/aFQL7O/1o+qVZgTJP3wm8
+fI9PQOPiQ4EAzdIS01ppsj97/p2l82E0ky8KSFUFJ74R95XUC0oI4SX+C2SNosOnG7cLPwJGYaF/S6daZrEtHbQMQWO1/3CNX2eFZw6seL+C3maaehqL/aRb7XHAeFWGfAL7tbPbzlxDiLD9N3wUzu6CYzu+hSwyaN5mqX+u9SQZUxAqUYFG
+c5mWnKfLimVju4FxnC0CAgYr9TrKOo5/ibgfMbh0YSepY1DbWlvm2MeTnum/MX7bGloV4hUFo7YCE9I00vI8FWNE9sJg77zoieefQFf83JMoNLYonHECW1OjLbFhk9C8nTdPqQplpD6QDNy8HQrSFQmkvoWmQ3Z3+zBFuxDX5P/+GWtfxYgY
+jXvENqysCwul1CfOSDHADtu3/GbPuEgxFlEWg4zcbf19gbqdhVxF+xLyHhzd5k09RJOlecRsA0nfUGmGI85DcGUDPRT2W/E+kDrqoxWSi77QbqbjXSqjePqzXmQMbJbZ4rDvRv+3C4uU51mlKQ2zVcQHm0/+fgFVTtAw2m9lpKjmWA2tvBtg
+qho+SNJ00tbaXAIdOwJ8qiIGZxpuRPNVTdQr4SiRP5wvpMaPv9uYt2TN9cwJWlxFV9thDptMdHYdfIrz1ZgjFHL/jO46j875y0Ky7MkwC0ueX8hZpNSIk4YdDwN+mUjenoaYP86WDDeODKOfvh8rwOjvYwpqim1AFYHYK10S6pgnReyJelVn
+CGEyMM9JHi6tCuRHCOA3mkaVlUz/g0+CC/RBVsiFHhJqMPquKBa0Plh71ECbuu2ff2b9FWQkiuR4FT6dPcmxfRddhBc6GOjCL6Ln+6ptTXf0rLKdPK6yp7UnVjfxYSDj9qU6kwMVkMkdSP6x6LuWPR2wwCyJt9FWXjJ4evwhscjXUHtev6KU
+LlSsz6nNy52k+yQrLa+rfolJh6wKNJXKlpSmrDpBRceSRs9yeJJCEIdjgrxnGRvgb1OAcJdpgbqfxsZ3oNnv+X+jxI2Y3m0jNI1tkPU6Ef1D+J6xYq4XmPSKKRzdl1zaSnvLBNpQJRgMlfAiPt0mre+1sgxu/bW9smHckOXAtLlpB1taLy9E
+FV2I88zCgmlM/TITox/VVJe/wuKDvHXtu24hKPtbLJBIa9tVzqwISFF6Hau2UJNfuNQGvQyJxDZ3/sU3xZfcyVQqpLk3hVP+78+25bSQlFr48pFZy3XFTfQE1xhxmEYBDC2UlmovAwJMy3GBxI48T/rCgxUMjjRDtbSgZCWXn4c4TRdOmI3/
+mGicDO7ffKhCNx0zLuhFpk+4pN1p3qKFATza12NWsso87aAno3s8Gj4tXoCsv8LDO93vFW+VfBhyqPXySbrk5BOklTWtpyM6xhu3o6011WXxyei+RBahrAhC3dyE9elQdTNOTabvnZpmfeOeMHxMtCTytSnzeA7207XhgC3c3jdMrkpRH4jc
+aL757Z8A7B8fVyVra4rt02UxsMflppRd8xD3YSqMFBd7O6aloUp7bfBR4Pru0UABmyoEaOH0e/TAefwJV3RIIsUDf3UDcTAf1KfdeSUmHytqi7k70OjKk7NiXdQmk8rCN/r8iAL9GAMw5eJfBk0YliZwOpXXgU5ifmAFzyIv7jp/fu8H5n9M
+CawYtFY6WPu53+/qVd5RpjS5PZKnBuKnV4bUM754jFFy+mPZx8VH/rQSCrCDYZj7W8y56r+OhgOCxsrahM2cTybK6+/BIYmgC1lxl9FNeRFMxG6FnyQtT49AoXVj/NU9uLx+d1GlqcatO1AjBuAblAJ72nRtwTW4jAaUYzdp9Ksm75rAWV8d
+uMD1wbnXhTkXZ0J6LTK1CN8C96RUYuWn6lXwVOu+AsbKsoSNq47L+0wD9VbgQY9ZyxDXsHvv/Cvc7ehlHML/IH+H8MOnuGxvBjLvEDxCAFZ7gSL047Xn0RvN4BfXEUfZxP6PqBoKaDlmaJ8i06g8oMQ0y+jqGdEKrVT+h9ssDYP+Pj2p9aDm
+tczJxaP3aXeUDpDcM9v5dc3M3L7QpkdnW8HQEoo56YOkYzTJQbFu5X2RqH+y/+8ga+E5oXtY/J2LD8K9stF0VDqPZOQvbkRfoVBJRyGnoD47m53oAsxRSmDUz+jvSYW9IX1jlSgY5Gtu3k28v1stOGJdATBGqn4w9cK7qhx7zPMyUvYgm7Na
+S+18isfNxf7vMoJWv1UNrLMmrrDFybH6O7aoy9/b270c8YlsoqV5ez6arXSOEXb3MvEqJ3xYMXqoHGyb75T5ksRzbetAN672Gdff1bYRH+HFfyg2Mx/IkmYwPl8tGqE5b0nVJDviRnmblfk4P0K9rydDDtdghN1akQD10p0vyrQCl29JBm31
+61Q/7LeVvisreyM/Y6K46IdpiqDEMayM5QshU12ADmE9TM336sBEUP+TY7D1uiCyk3xJH9RuSvVCp8SnSyNUJ0hrXrn625f6QqtJFUVjWc+5G8Km9KO/kYK5ryD97P1chZqOo5PdQ28Naqhjz7anlyspnpMG+Wi1bX6nsDQZm8Zn/M4+65VQ
+XoFmYhUUIN+ajvrbzV2wHV4IuslXntTkUVXWjS8ilX2F7wqvFv8Hr1JJL1bN4qWg9pFoEnUdz8GMaXpPTrR+ym/LSf1Gw1wZk66hc3od3ahc2mX+nlDf6NG4G97u8p0o3aZil/tsjEIR2HLxZTKhyvT3fJLabXGULSvP+TnkmzG1ZynIC9HK
+IZfEicci4BJ4eD+jm6rDZta5vkTYwo+KYqs6BGX7J8koo3ZdrKdXIBmMiRkoS4a03LguUqQHTq6OuAUXb1vFBKKGT6bJVpNQYqd0qw6IGhOBQB67p5w/s4Nt4Xkdl0v7uY6/LtoQfHK88wDqGqyXgAA/mQ/i6t01JxKXBv85LH6tw+MCXUPo
+4oWin44dmfvRmegAhW3Hb4jkfDKk0iIKCRh1VNoiqkc6Ewwam3+hqeMsTy/jQSIz8I5TDoHBphK7k5TGXr7tTUQN/6yflaGBhsusqLB5sjG845aIK6wm8RLuCSOVAhZBpCF/DCyFzBBzuYY4A12YBgJ/zoR9CLn5Kt3vSeAQSuu/sZ8oEmY4
+I7Zsdbx7p152/x16I99lKr15xXapTMnnYhpIl6FFC8sAACWD9Kfhj3kvxLaq08dk9YVJXKh2luQBnE+dtYbhJkcSbWmWCjDX4rIJIYKAmJKB3BIqnJPS0d8P9fo7eOD2jHXLhDW27sPFLH4RXft2XlMq/VDJ76uHnhtxD1al1Q9XKRiRwj1K
+rn3faebdYMAKUvKetbRyQqNfTl3A75r93RBzrZXoQOJQwwc5aYECf7vk5oF1RUtMDUMd3TISflJs9mVaD2ReKlNAMYh8jVyz8B61GMXigZte4QPPERCmzZuA4PWgUAm59CuZyaGv6WECSZ/MkzZCbnGzfNEax7eCCsjzi4sSM/uAypPwp3rU
+gaLjnSazCgIv+RRWymLwe19uV+ROVEhhXLoqvm5DF8oHmu1/E/+sAUDBgfm318zjQeNXxlXmY2TzvGjS1+ZdGozGukA8yhP97UuqRre6U3lry95pmkC+Ccuooh9zJxRCacn2EptYHxliPT3SWEsoMcHhBKHdmzM88ODi6OY2FbfajvUDqOLW
+8eTkkKy5oJ/AZKypxHfUG2ekU3FAjeepb+wDzcVAxK5+ojym9DJf3FTpr6lTZWk3UOLNA3Se2hUnfmUovaI6GUcO2tAumgL7l6YrPRXsJNhiIVl1S+GMhZwABONxuHslHvOps0rU1FidcxLmaFElTivCS5+Kl5gDfXmbPavoRFgAS+GytnBe
+7LFBtgGQop8o/4Xx9q7EPxXwKojZ2128rfQSc+L/ksooRaBElbPasdeZ/EOoIuJuzi+YJ1/LIjpe9P03jPxBtgqGE/d41z5O9FoFoM5UJVi4u0TxPLoH+c4nNvMQBUtZXOiVRfeo+rhBt0DQcjSrNpBL12jQTXBe39ShTV724lNMChO9VHCG
+AYm7N2tHcLzoTBv6+EXKmrdGa7OrI/bEwEi6mDyOqlxJtlzyUbd7U9wWiUKc1CFINykMK2Baro7TdfhKTfFiJki/3YbTZKBR//mEsym8QGekef3BHgVVYnC3agAo/ytCH6uRzslJTqP2dlIs0I148TgG3cHs/1aDpWrTi6itgXUHIr86Tin+
+oSAvAbus1bk9w2qRsU2jgrNK3AQZzvA9CTCsCMZAkWLAABze9W7xir7Kd7YbJ655tvHXCmHu7kBIhN2awlBSxfvC73oCBNzrS9gTo1KQRSIFPISeMdDjOeMdoADkh5YZORpC1bFLfDxEFIQt7RNm3ONMLQH5QCu/oaIDnwcTFXtIWbom5eFu
+Wbgu42i154C98iQf3FmEl+OUHbQ9bQpH9VhRmSosJuepmBGfyjOeypLWKv92gohIk3umfJOU//HO84kknnFm8LA758fwtMWXLIAV0rcoPrj5fy0hPvWMqj8aD1Jbw5wwAzke1LLSLy3uMoyHe1E1asCo8Qwl5VI9xgwx86+650URrXAv06vk
+PG326Ew3DpMEIFf8CBCickwbx++NwQP0LNtwK0Ztnoz2ny6USJvpm4VLUv5iPEUZY4N4gtnFH/HucloriHxSJVAQcf9/kKIgSNJSa9zNCupQBFddnu8RSTeo2R/MTNri0pdOWn91roS+aB+IEnqNlhRvw28Fj++VmpfTSa/oXb4DcaHCQFR5
+qHz/Kr9j6n4OONDt1gYWTs9IDT6hfcWQwXTTtc6ghEx405XwiQ3eeu15ZAx+M7dO3bLPXht64epo9waR5Qrey8yl8hi/8Ml9P6KxLZJFvP4QMVRK9aqj9JyhwSxaBNIKbtwXWSXi6xdDWf7asQVW/JJ+ECfLSHGk7dwl0PsB3xtx/8y8bBsZ
+182wO+9fFh4NjsLtTuxWwQPoEWoicJ17VKDo3nrdG/n6q3LZG2FBTemJVu5+QXc3F1hiv/qU2UaB1jFOlFQsBOrabUDSKAAl8oopao1WtNO0oRVEukNQybWv0ZVAPVEdMKTtdiMoHL83qxf+Hz1T6qc30nYyfnrrxFNYYvSDKKspl1pK5zTN
+NFCZRUjsMXBQiPBjYNm8QNcaZPHa1ggSYJD3Rsb1KY96tPl2hBtkP2ePYAoIDql/t7zztufOFcrNbtBjykiRKSc78lStMn/u6gzGMdV/nExtvP2878wD7j/IwLrrKA9pU426sgPwGuuzMmmMsPRURlzK3s8jI+Ui44ztI0MfwKrHAqZk13qZ
+cAzihgoFoEoS/5MtODPI03yUhcBwRFcbrVEKiaNIQz7jr7VQcYX4oARN7cCzYRX9BStDM/LEcF3zfsJivK4bLuKXYvrwsRtxdl2neK0CMzRFtScr8lbg3PSAXEYc6HVBFFjPTGGbLWaUjwtvctcdj7xSSCO9qv/eXYA1fKQMhjxrhavLZDLt
+hnViwhq0+f9RoSxNh5taLhEClKp75dOgGi6CDQGGsagoOzhfu/5Oti423Jp3YBpw8sESXW9B1d1mL9fCdRzoc6sYf9IEIxtq/mDzmx1XmwLn7WZwrHzR6YaKLwSytZjYdvNcMH5OZLMyuQXENUgNHr0BQge8G+qwEtEB8y+kNbrutgpt7vG7
+I2yowooNwqiIhTDMPsnwUMFjXowY5dyO89ZDLktajG7YcVxRKvwDxbU7mwaiHlGngP/d0uuUnlsy53FXlBqrCiilRS1qoEJxlBgjd2GhxUoyVrIeO5PiS0Ckar+wXmzZzcBj0CtciQZQ6oYZ+mN4dKy8e14HiUcIe5i8eIRFCtaaJgYt3D0t
+ogfiFRm10YAD5Kts4LrvvOQ/lq2FcYF8IyFrS0VaA1aYcLPTG/AK7CN+ug8+2lWx74xC5rNXj26xgRVPB+1cckVG7AhQq6DeJEgRs4v1fuQtYOAI9NfszPJghhwbfCEiH9tkQveC3TTFMu47hG6GX2PjVfMHowrGRENnEYtDnk7pSHEA9HgY
+Gj2agKVv1flaQKxkZi1pqIcc5iICQc53S4Dh+m1KTAkbIosv75sJ4sotlssU4mYCG15KrTTyL/A4TW9D9uIRCqg8ux0z9vKhSDmi21axC2deg3i3gGw+XaHp0JfQ/hza4kNCQwnHWdOZC+DjkZvp1TcxekPsIkJUdvuqAzUcvruoT9Sb03cK
+Gnd7Ze4kcOE1x/Sa0qVC6zyqxVB011Lv5CvNtFvlWYYLN8S9y7VMuwWaWWuELlaFxRYTyKmNzQpl7sXZlmkJukGc3syLCNFsGas9D7pjMwFcE8MvBg3Trv6kOWSSQ3QFFiI76e4ICdN4JlnU5Fd3kCffVZwPZTsfGMyUp7JsGg5rNg6nEYb3
+nU+n7ZCHaGK7XHPpXwIwqsfDFaYcroTapJl9qMVZzz0ZzlVIoyVsiCoQJn7ilbOm7uYb7C7hdTn4i4GVRWCHDLUwk6+lE3RVtFQQxy5aoOJ6UlF3OzcLbdCgofbgts95GCZy6/e82LlxhLC5FiX5+Tvq1v2kzxdE6FojULHOFcO01etRY14n
+gq/hfJroJMkwBmQ3A2wKibhez08W0Tpi8nu0MJTTgKhPaASbdzPry9VFKffi8ww2fzbglwzcneqxDQwqfd9pV1PXK8zhrmA5vbaTJf64FNyhTFZVs8/Jz96Bj8/emhQxAgUFkS1IR/9K6BXwHY0a4NG/HqUFtajV5ftos0avjAQzUn6OviPs
+VJfksld62nd2Sy2Wlgu7YroShUfqCCkMh2OsYVfmhqyathpYAFGL+aRrkh8CeHbMOmnCa/9jMT/a5q2jOPbQoAkUXBKZ8Shc9txV9fukSduWO4zLpE7nNUHw8kB7teqVVzXKJvxMe98jPnJDEO5mbMKz/gqhUyK4FoidOHvLlAxoT1+RDb9l
+pfTunDvy0+S0k76Pi7jSlD3OyAbLGjS2Je/6VOunksp5CIqImmjsWJWJApdRWvxZ/3SDpybhA3745hiF9THhn4o+M+BK/kz0TMemI+u0Vozv7yMpmpTsEf3bLEzc7LrXwxmEeoF1Ujj2SYmlzIV7g+CPp8XD3SM9IsVoqlA3CdrJX7GqwUrc
+kLfa6Nyh0UBqSwWHSkt1X+9XQa17/hPTS7Wx/cozTZ0xjD12e45oQS2PTaifUEaFEsAitP0g9HDgB/sIud1t1Hgj+dY4KOFewBHjmv3sw75P8pGw7wTq4MEyf2XuzLXFEp+5s38lU4YOnJhvBcWaSMsoWOS+5OlNw+2TcUbfp+4m7lIpXucY
+2CpyfimqrXsfQH3tJF8SKgHI0d6vNOq8hHAVswQis/oatFAvCm9lYPyx1PyfVH/TC26LmQD+eFLI2AYIG/gqu8OralmQ2qkk/VV1xnsUuGAAiHPwof5ajz+LjuPJudkyIk57kY2xkrQWBwITU5wLQ5aS6SuzZqORbCOLiNzWEtjk2tTjakkr
+u+y/ZBxjzkQr1fk540E7KvKDkQuQ9ZF7PqP1rmB6odoxbL7/SD8RuAKDpUuzpSUdlIFEXHwY/MX2f06YYGg3udADh966X0oVA6/A+YoyYa4xnlXv1uRBA9SsJqqWPDABPswVJFBnj12NZlFf1I6/mGMnnQWxTQxtfYPM3N2EHHPr0QmZh9X1
+qUfS7v8xW33WaD4egdASr/s3GbCSyciXrOP1Z32URVF6+bgTaSOEvH6ZgVjIKuoC8K7bbKhcOFrcUsLyRsy7oPyvK7bX0u9QGGTAarcaGRieYspGAjPxkjBYgcnr+TUF/Uq7JVjwyTac8JdrEG0MvKqyUSCAiZJKUlPGt4HsSheLkibSPGFn
+4wsELEGlA0c0dEvmMK0GFV0f4ecXpM95T2nWC1BfyKp7/HE1kxO3Z3SKvLXkR4XWlMzfLP0K0qgWO2CYhbVT4vLMae+J4qzeIWlAOok4d3nS6LQzjSXyMtKvp8LCTqJxs1eZ+N38r/AHmz6R3tllj/bcLqzS99EjNFj1WCsKnPkN358ea7vv
+uRshGCdXnwmqAns7lFaMZe1IjWcoBUljLXlK8zeDYb0mZqZpqJn6kIWbeboTncnMQ7IoMfEA3WY9wn3JQtvfQVEG4lpW6r7TpZh0uz9Myy+yYA2q9pM9Wh45UJu8YjN4czovvuB0EMBI6v16JhgIy3mBUd6YJwJDfZsT2i+63KV563siFmYf
+iqy1TcBgHzllVJek3fpvNlmRwIvfFVW5asYy3WLF2c9dUTQAMQuqpdPDFVHfDr4j8JAtuL1cQ0/wmdvZemnS9eGxqn4InDSseSGlvthXusyYaJVje+/IM304zTYeHeVjHZJJeNrRjDq66vBt32BYu3wKq199/BbC1K1xz7vKefDNeRxOyk/z
+6Y/0NLOBNOwL6bM74HaggVasiefIN9N/boZuDRtk5hCKRC6+8P20jedXuKeFXGwR69Q4l9VqsydcvDm5CWv5PSJcee+4BWwIGN20l0h8PTWhmIUb4W1VAwGHzwc/jYBt3p+GnsQG9UHIDdlLFDyQmn93D/obMFwFoIG4ISwXiQ7rNQ/BllvL
+h3dNrAZJ3ve9tEhvQJjHX1mNJHpgbUD38s0ssD+KPL//kGYUJDMJqXFRLqH4s8eXiOze9gfJJUrY2Uc3g83NcgDaMrkQx6TeWjUOYPuvFNEdertpTfnm+D8FjxnQtYuoYZLCN9wGXrNU4lTHLAeCYVZbtkXzlZcDITcgrjE7QNyCoUt4jmIP
+/15wNDpt51kvMmynixN/+91fKk5hO2JBFYm/r+CalIXEunFQOWV1zoJGiFtM6nqxUYfLuCNF2XYj85djj2UznU3Q+u9X/g+Umf+hw79OJOuqTwdVUkUO4mtGQwfeJshpFfDgWcAE3jBI8WtvVhflXwPim+nG/RLyomH+0qqJr37SuFyr9USs
+4CjRdpOUobTWrdgEjFsnth0cOba0REF4uZyphOSp7hRxfaToCBA/Gl9T4V6lnR4Q7XiNNF/RT1yAhFhgFQ+LXzWcsmrunCJ0q0RBJGx4zWLzdsRRzjMw/OnhlYS/MmX36Q2YxuDw+EqS0KnvTmEulbPewlNVMTWhkzBCcLZIjVb0WREUndDu
+pndoCLMi63DgL93RBpaCDDhDO3TXs6vXfmmC7l0kvYXo/r5rDLGPW8RbqUeuW/iC4vMxofhIlnmNm/PZE/5KiBxGcX3MYAJSQkmpoKeugdU60PkNSiBSiK7UJo7i2tSMoxzwAYGSjNyZ3GjPe6Y2F6pN7jGNuTUgFggZYWgAjzk+OH5gbMpH
+UuVxR98cfuE4DgbIeScEdiKJPF/S+OJIvMjh9+ZvwAWosU+0d2ige6o2wxzT8RVrKOP9meSIlim6h8QnzW59yU+YUEywzv+KIJCywOT6zfyd7uJF+2qUquZKXH9l+WIBl6rA4ul5B/HnA5LQxNZGoZau/lYlOIttjEI0zJ9n08Y0hxolu04N
+zk8k0e2teiKuC/zKjaBrPdjVg7kGal8F02U+bHuiCJI0EjPhr7LtlKp1oz0NYUSt/3WNo2PKdbKy1Ao61tPzEsXRodibWaxtCbUsy6ekhtodSmCeP3FKpcbPSEVHk3htq4dCtvDA1wFmqt7VFWAgG684cK4VYYJiBCphMWgWfqKF6dfajEqU
+IgY0iAgTzUs+30dRC/x/9omTZaAAo1UkFR0l2PStPywlfnFYaRI4HVzQiZrUIzC4O+6tNi6kwnPBlVZOBRqlTSMvxIW4r4VNqQ2bw+9cj/tRWzQmw++/XBfC6TkevlGK2F4gfzOKqfiCOddlheU4/VI/KTb4TBP/j0cAY4LekMtMYOjm0hKI
+w/hnLwX6F0PuIVnzAWsbRv3byRNHzAjuL4zfhItqOFgobNTqdezdDJPX+1S9u1oxXhcMpxS/etAduGAXvayQU+Hkm+2pNanGPejgEIL+1PvkUI/CvTGElUcRCluTJ+yaQwGpk1TbiCXaO2LohtWazybkJy4CwlNJHaIhuMButbuApeMKyr2J
+R8W6RA9uI5DJVFgCxcVBwik84la4dcOUpVKr2SeniKflOV1fTd8XMNkhquiHSLU1d5UrYYYsLMgnv3ffjykENfQWreAMcw/LTGndQcPekpb80WaRH35hZ3RSKt6nk7H2qsZtLTrLWpqkF/Qh3tdb2jYdQEDjUZiebcdpgkgRECg0iOSUzE9t
+cxmKA5R2WzwcxFJ15s6BUQiGsGk712dcn+saxAZfdoWXRV+oOIqK03vkhgT7PGhH8uUR5+keLC8olaqqJo2Im3nw7QrEvv/U2UhO+CnPpABlLMHRzjKC3+Qinf96ccUPCrzrw9TCQdTK5jQg/rh0TYFreD78zHkw47GJqP8BEFoEz/swl0MU
+/JuVWSPPGCmafkx2s2FqA2ugLFu35NFa6z9lUr16+K4Z42Ltzh1/vzuCwiJdkcHMtuP7hva11jFUOKTcfm8E5ENtbjMQzTQL12Pya9AGmtXAMpYkhnZ9tFabT349SQ+9mkWLS0cbmhX2qWE+Gorx4Ng9r1ynAoCB/I4sY9XGFeXYjnX3yy/z
+NRNo3whdtIGwGoeLwvRr8/gmZ2Lp0x+i1ZxVl1cLOLTfSH05Y1zLT9qE6eQbqy1WiYU+lFHNI3OVzI2cNAB/fGMsgtLOQdS/amKaT7ylsrO88UCmz1nx4iyQfFGg1olFl8D7TErLS1RLjCu2JQ1HRx1nxcwPCe643g9JecpcCjeqe1IAX90x
+9Y6LUeEM3IZUqB2jAyrwIxnVK57trFekDcx0TtdsVz4+F6cdwpqKXIDbXHfanUhp/wV1wHw8kTZbcsrXzlpDl7EDX32B0riRh6YlrJAtrxX/t8yx8hae954nqTNDVy47AJuhoNLxvMvdjtbaapPAokmc+uIwy7J0pA9WiN3agzyRbNFH2D7U
+ufSNL3Zo5CQX54LUqEikQSfz1RYN4nfDyUUdqgFpz3MloL9KH1do5rn6BXfzvLfuufnukL3S5FlcrHKRJ3Q76/nz1NXvq/RWHszyrqlIgn9aAA96jUd7A/wgOIxz7XwuQsK1UrwxcNa8DbQjJUz6peQlmMfNEZAO4Of0zyA25MUeKoPRSSDF
+7DboMZCZmcLbQq8dURcLwA1zRMugd9ux7nltdCWi3TvExPsmvIK7a0ik4zGah6Xa+zjT7hIu0nZCC6pwMTdaT2HZnXcMZjHlb9xWq4IyUb0CLqIliHhvkMk/U6TMFcZS82OBagsMUQtSCC9RlFQ1woe4y0lvuxvFTyJaEs9+kpwaniAuzTD9
+FU7OCnVd4e8/dyYo/j6uxBOLeDe6Fy8EIWz9Z37XEIf6dBpjPuoFmQhKaCu5Ob++/MYLFUlaI2v7kUNF0IQdIwUmfLampqw+V/TxjprMsFa9ZEWBm6x75KEKsbDIBrO2Ty0CTHr+cKStpbxPrhPk0+AF7WyZ7HPw1oIpxgLwd1D+IKnf0AKN
+iCWyFVknv0slYj2URhVECQIQwFWLqxIDs5pfOhREOHe4iwKXGM8102cPNeFxtiEyVu79T9FD2Vjqc6UnZH8su0/ng17Kr/L+kHYGU7SUsXjTnIBG4ToFRshf7Du8+/fuXXlIBpws7be5erzXxS19IvZoOCyIoNp9vo6O3Phk/NZOsQin8G3V
+an1SHODW8GQTmogyOx4no/SATizTii6UAkARbaLXWvP+guwJjiDlNIxbuNGBS1hcy/MoGJheYrYHHM60f7/Qfsb3/NKDv8DmnbcwUvFoZBrUDgB3+T2z4kMSIe1H9R/fACskoAfg5H+EOuMyhjZiWeGUBIXSXvBflS/QdK3PVTAL5yTNjAxs
+41Jq1gURd6yhr6UtE9F3OFgUl1xQdRg6oRMBTTMl48f+8IzHZpdmlMVAK6fPvDhrlqlJmKU7lKHakA5AIXpIT+lGM2D7W2wey6Vyk3+udaRI1NvfHQjwPt22vuRyv4PlY0dxaxXMZqJKHb9lT7XQ/56CSBlIwNlx99dmo+dzq2NTjcXoT3r1
+AZBgS/q/mxTNKdjnAFU381aUZH5lv9ANmSlKZWqUoAn8G1fRCBc/c6wwBOw28aD2te8w/VZRMy494bmA37ZAAb3Lb8+q7WQvbjTyPUY09y6U2SlfRRfGdvlUp/hbecZZYdr7i+RUt+WEm10ZALdE1TLuOpdwAnj8+L8p5xbB0VGwZqxAAYkf
+phirPOt2K88ArRrUDiiH4dggK0Xb5CwhqOL5NQKxVFWGsdRNP8SBCB8W6OfLQA+4OztAHMtT6LSu4RhMJxXfMfOOVkSwl9MhsQgoRyDWirIEk7fjwQ6g0fgv/bZc6ndtbYDL+vQjqQWX/NezCFAz+GQsTAqWA9ZMq25SkGAp5VG2Kb4cFOCY
+fPZmDNqi1xeXrG8e2hE/yo8qwM/VdxbbSkfp2irAfr4A9NCYYKKAVYrhEq5Cl2fMrOHhCPHRkMAECTfGc5aCD3fhNCGL62AZhmmDufTINU3g5quO+/rNIg806Gl4+9G+3mwzOqfQGoYSuyLWQsZU6lO2kplnuwYtCqz+igjrOS5zADuaHfWS
+0kdilsRRmiYakAWwaLn8PQXsa3aNei6BSU/xgHxhT16ABTAqUXuBxzqeuBonp+FISY+YPHC4wu7fSwnvo8o+dP7RITL5HD3qC0k6fyYOIhEp3UoxbJSsEXN7AWLyV2PuRjdTA4bYjy4rXwNtYSOsz8wzUt0X3X4w6aJ/HUplnM2bk1EGwLPQ
+a04TN4rIOowBDhkLX8yxSqNnDW675kxmZuLAhRJD+6xHD87Kym+KBa7iSh3xD1IPaGKJR5kZ7fli7V7johejwtBb0qs7XCB9siIR670mOq+d1cHi70XMdMBiu32u0ASwsvwujdjNX86BHfFOguxzLIHthIr1JunnY6e25Yx954irExByQeIW
+13fsKQFhdYiSaLrTljZKthL3nwP0hNO8LA9ksvoDRUZ1U7uj8kz8Kw95+NbRXAGGDMJM7JMHWNgUEw7wzPPRhQUQWMiayCh6453fGaCs7XVVH6ArS23Lev1Urbgc8KOFRg68w0BTczFKfHsRyL+wHNkzglvsRcOfCStHoAi53nVhxyy5CqFh
+KQU+ZY1svxj1QTowQiNGQrEXWv61kWv/7mV36jdL8u1+gf6tRymOC0KfWpVqJIMUZvecAOfb4LfF2YKQVVyUmkjJyh8SbWbuMv6vnCMoJ6qqxI+/V6Qt9WCr6/mobkCZTZ4G4VpvZc6OTCcp3RISIPoV/negeHGrJ6ATSev9RxcB5oZ6ziVc
+15bzUa8UuycXVSVhPQLBHmgxtDfW98W/ZoooaIQOG0kTQsHJ6kXQWnbvDkTrVmky7orXoV9FA/P5tBMWgRaibHLiFGgImA4JaNP011Q/vXm7f8FVPvC4tK4YDJ0a1iDD6HaMFVt/VRJpoe7X9IJVJNufDQZpNDpcf5Ki+9UzWNuvdTJODLbL
+SUG/sEAEmBKPoFms7tjEK+Q/6RoMQkYl10z/KIRcHPRpZIL+4BYpJIR4BmKTqk+yxZ9ixiIM8o0Rq+0VtFtI6rLwNz7M9th1UucDAtS0nay5jgZJ1xQ3u+oFM2OY2CBl0BjesGCyOd29lExo7ofkknIy6zUKrkOTSWCIo1fk3Y/FrCDA3UrI
+l+DwVinaD6KzccAAAAAApRItJDcDT6AABuc8C4sMIkCm2e7HEZ/sCAAAAAARZWg==.
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/cache.db b/devel/example_devel/instructor/cs108/cache.db
new file mode 100644
index 0000000000000000000000000000000000000000..69012bb84480b8150245b15b506b710974b2008e
Binary files /dev/null and b/devel/example_devel/instructor/cs108/cache.db differ
diff --git a/devel/example_devel/instructor/cs108/db.pkl b/devel/example_devel/instructor/cs108/db.pkl
index 4d91d3931e3377cfb0a6de8c39b187956cec0252..d5e7f185526f6f1e94e0a707668bef186d84fad8 100644
Binary files a/devel/example_devel/instructor/cs108/db.pkl and b/devel/example_devel/instructor/cs108/db.pkl differ
diff --git a/devel/example_devel/instructor/cs108/deploy.py b/devel/example_devel/instructor/cs108/deploy.py
index 86535e034b9fb635c961404822a7b6ebda121160..0e6e3ca5b33ca75a747c927b280ff2d2071d6974 100644
--- a/devel/example_devel/instructor/cs108/deploy.py
+++ b/devel/example_devel/instructor/cs108/deploy.py
@@ -1,26 +1,64 @@
+import os
 from cs108.report_devel import Report2, mk_ok
 from unitgrade_private.hidden_create_files import setup_grade_file_report
 from snipper.snip_dir import snip_dir
 
+def run_program():
+    import unittest
+    # import importlib.util
+    import sys
+    sys.path.append(os.path.normpath(os.getcwd() + "/../"))
+    # os.getcwd()
+
+    # spec = importlib.util.spec_from_file_location("cs108.report_devel", "/path/to/file.py")
+    # foo = importlib.util.module_from_spec(spec)
+
+    test = unittest.main(module='cs108.report_devel', exit=False, argv=('', "Numpy.test_bad",))
+
+    # from unittest import loader
+    # testloader = loader.TestLoader()
+    # import cs108.report_devel as m
+    #
+    # module = m
+    #
+    # tests = testloader.loadTestsFromNames(["Numpy.test_good"], module)
+    #
+    # a = 234
+    # self.test = self.testLoader.loadTestsFromNames(self.testNames,
+    #                                                self.module)
+
+
+pass
+
 def main(with_coverage=True):
     mk_ok()
-
     setup_grade_file_report(Report2, with_coverage=with_coverage, minify=False, obfuscate=False,bzip=False)
-
+# import cs108
+# from cs108 import report_devel
 
 if __name__ == "__main__":
-    # import pickle
-    # with open("unitgrade_data/Week1.pkl", 'rb') as f:
-    #     rs = pickle.load(f)
+    # from diskcache import Cache
+    # c = Cache(directory="./unitgrade_data")
+    # c.set("index", "value")
+    # Du kan gemme det som en .json fil...
+
+    # from importlib import reload
+    # import cs108
+    # import sys
     #
-    # r = Report2()
-    # q = r.questions[0][0]()
-    # for k,v in rs.items():
-    #     print(k, v)
-    # from unitgrade_private import load_token
-    # data, txt = load_token("Report2_handin_38_of_38.token")
-
-    # print(data['details'][1]['items'] )
-    # None of that coverage shit.
+    # sys.modules.clear()
+    #
+    # # reload(cs108)
+    # cs108 = reload(cs108)
+    # # from cs108 import homework1
+    # # reload(homework1)
+    # del sys.modules['cs108']
+    # from cs108.homework1 import a
+    # print(a)
+
+    # reload(report_devel)
+
+    # a = 234
+
+    main(with_coverage=True)
     snip_dir("./", "../../students/cs108", clean_destination_dir=True, exclude=['*.token', 'deploy.py'])
-    main(with_coverage=False)
diff --git a/devel/example_devel/instructor/cs108/homework1.py b/devel/example_devel/instructor/cs108/homework1.py
index 8c7c0f9d9d78ed8cd7868e871eab1fa911fa041c..9aa7bccacc73c02e0ab4af0c89beac9a52a1c316 100644
--- a/devel/example_devel/instructor/cs108/homework1.py
+++ b/devel/example_devel/instructor/cs108/homework1.py
@@ -1,6 +1,6 @@
 import numpy as np
 
-
+a = 245
 def reverse_list(mylist): #!f
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
@@ -19,7 +19,7 @@ def add(a,b): #!f
     return a+b
 
 def foo(): #!f
-    """ Comment. """
+    """ Comment.   """
     bar()
 
 def bar(): #!f
diff --git a/devel/example_devel/instructor/cs108/report_devel.py b/devel/example_devel/instructor/cs108/report_devel.py
index a551fc8b0b020c768895f5139828e276840dddcb..fc8a43a6290a84992741a64bbb84f58e796b4da3 100644
--- a/devel/example_devel/instructor/cs108/report_devel.py
+++ b/devel/example_devel/instructor/cs108/report_devel.py
@@ -2,6 +2,7 @@ from unitgrade.framework import Report
 from unitgrade.evaluate import evaluate_report_student
 from cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo
 from unitgrade import UTestCase, cache
+from unitgrade.framework import classmethod_dashboard
 import time
 import numpy as np
 import pickle
@@ -18,28 +19,18 @@ def mk_ok():
         d = {'x1': 1, 'x2': 2}
         pickle.dump(d, f)
 
-
-def formatHeader(fn):
-    from functools import wraps
-    @wraps(fn)
-    def wrapper(*args, **kw):
-        return fn(*args, **kw)
-    return wrapper
-
-
 class Numpy(UTestCase):
     z = 234
 
-    def __getattr__(self, item):
-        print("hi there ", item)
-        return super().__getattr__(item)
-
-    def __getattribute__(self, item):
-        print("oh hello sexy. ", item)
-        return super().__getattribute__(item)
+    # def __getattr__(self, item):
+    #     print("hi there ", item)
+    #     return super().__getattr__(item)
+    #
+    # def __getattribute__(self, item):
+    #     print("oh hello sexy. ", item)
+    #     return super().__getattribute__(item)
 
-    @classmethod
-    # @dash
+    @classmethod_dashboard
     def setUpClass(cls) -> None:
         print("Dum di dai, I am running some setup code here.")
         for i in range(10):
@@ -47,9 +38,12 @@ class Numpy(UTestCase):
         print("Set up.") # must be handled seperately.
         # assert False
 
-    @cache
-    def make_primes(self, n):
-        return primes(n)
+    # @cache
+    # def make_primes(self, n):
+    #     return primes(n)
+
+    # def setUp(self) -> None:
+    #     print("We are doing the setup thing.")
 
     def test_bad(self):
         """
@@ -98,17 +92,23 @@ class Numpy(UTestCase):
         return "THE RESULT OF THE TEST"
 
 
+class AnotherTest(UTestCase):
+    def test_more(self):
+        self.assertEqual(2,2)
+
+    def test_even_more(self):
+        self.assertEqual(2,2)
+
 import cs108
 class Report2(Report):
     title = "CS 101 Report 2"
     questions = [
-        (Numpy, 10),
+        (Numpy, 10), (AnotherTest, 20)
         ]
     pack_imports = [cs108]
 
 if __name__ == "__main__":
     # import texttestrunner
-    import unittest
-    unittest.main()
-
-    # evaluate_report_student(Report2())
+    # import unittest
+    # unittest.main()
+    evaluate_report_student(Report2())
diff --git a/devel/example_devel/instructor/cs108/report_devel_grade.py b/devel/example_devel/instructor/cs108/report_devel_grade.py
index 07ef022dcaa70d6bc25d363a7a98aaa03cad64d8..3237fe6f968311c7323e3ae4a036739d7c1391b2 100644
--- a/devel/example_devel/instructor/cs108/report_devel_grade.py
+++ b/devel/example_devel/instructor/cs108/report_devel_grade.py
@@ -307,22 +307,17 @@ def remove_comments_and_docstrings(source):
         last_lineno = end_line
     return out
 
-import lzma
-import base64
 import textwrap
-import hashlib
 import bz2
 import pickle
 import os
 import zipfile
 import io
 
-
 def bzwrite(json_str, token): # to get around obfuscation issues
     with getattr(bz2, 'open')(token, "wt") as f:
         f.write(json_str)
 
-
 def gather_imports(imp):
     resources = {}
     m = imp
@@ -465,20 +460,6 @@ def gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_
         print(">", token)
 
 
-
-def dict2picklestring(dd):
-    b = lzma.compress(pickle.dumps(dd))
-    b_hash = hashlib.blake2b(b).hexdigest()
-    return base64.b64encode(b).decode("utf-8"), b_hash
-
-def picklestring2dict(picklestr):
-    b = base64.b64decode(picklestr)
-    hash = hashlib.blake2b(b).hexdigest()
-    dictionary = pickle.loads(lzma.decompress(b))
-    return dictionary, hash
-
-
-token_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70
 def save_token(dictionary, plain_text, file_out):
     if plain_text is None:
         plain_text = ""
@@ -494,21 +475,7 @@ def save_token(dictionary, plain_text, file_out):
     with open(file_out, 'w') as f:
         f.write("\n".join(out))
 
-def load_token(file_in):
-    with open(file_in, 'r') as f:
-        s = f.read()
-    splt = s.split(token_sep)
-    data = splt[-1]
-    info = splt[-2]
-    head = token_sep.join(splt[:-2])
-    plain_text=head.strip()
-    hash, l1 = info.split(" ")
-    data = "".join( data.strip()[1:-1].splitlines() )
-    l1 = int(l1)
-    dictionary, b_hash = picklestring2dict(data)
-    assert len(data) == l1
-    assert b_hash == hash.strip()
-    return dictionary, plain_text
+
 
 
 def source_instantiate(name, report1_source, payload):
@@ -521,8 +488,8 @@ def source_instantiate(name, report1_source, payload):
 
 
 
-report1_source = '# from unitgrade import hide\n# from unitgrade import utils\n# import os\n# import lzma\n# import pickle\n\n# DONT\'t import stuff here since install script requires __version__\n\n# def cache_write(object, file_name, verbose=True):\n#     # raise Exception("bad")\n#     # import compress_pickle\n#     dn = os.path.dirname(file_name)\n#     if not os.path.exists(dn):\n#         os.mkdir(dn)\n#     if verbose: print("Writing cache...", file_name)\n#     with lzma.open(file_name, \'wb\', ) as f:\n#         pickle.dump(object, f)\n#     if verbose: print("Done!")\n#\n#\n# def cache_exists(file_name):\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     return os.path.exists(file_name)\n#\n#\n# def cache_read(file_name):\n#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     if os.path.exists(file_name):\n#         try:\n#             with lzma.open(file_name, \'rb\') as f:\n#                 return pickle.load(f)\n#         except Exception as e:\n#             print("Tried to load a bad pickle file at", file_name)\n#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n#             print(e)\n#             # return pickle.load(f)\n#     else:\n#         return None\n\n\n\nimport re\nimport sys\nimport threading\nimport time\nfrom collections import namedtuple\nfrom io import StringIO\nimport numpy as np\nimport tqdm\nfrom colorama import Fore\nfrom functools import _make_key\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\n\ndef gprint(s):\n    print(f"{Fore.LIGHTGREEN_EX}{s}")\n\n\nmyround = lambda x: np.round(x)  # required for obfuscation.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\n\n\nclass Logger(object):\n    def __init__(self, buffer, write_to_stdout=True):\n        # assert False\n        self.terminal = sys.stdout\n        self.write_to_stdout = write_to_stdout\n        self.log = buffer\n\n    def write(self, message):\n        if self.write_to_stdout:\n            self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\n\nclass Capturing(list):\n    def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n        self._stdout = stdout\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True):  # don\'t put arguments here.\n        self._stdout = sys.stdout if self._stdout == None else self._stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO()  # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n    def __exit__(self, *args):\n        lines = self._stringio.getvalue().splitlines()\n        txt = "\\n".join(lines)\n        numbers = extract_numbers(rm_progress_bar(txt))\n        self.extend(lines)\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n        self.output = txt\n        self.numbers = numbers\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct + 1)\n            if i > 0 and l.find("|", i + 1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None, mute_stdout=False):\n        if file == None:\n            file = sys.stdout\n        self.file = file\n        self.mute_stdout = mute_stdout\n        self._running = False\n        self.title = title\n        self.dt = 0.025\n        self.n = max(1, int(np.round(t / self.dt)))\n        self.show_progress_bar = show_progress_bar\n        self.pbar = None\n\n        if start:\n            self.start()\n\n    def start(self):\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            self._stdout = sys.stdout\n            sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            print("Stopping a progress bar which is not running (class unitgrade.utils.ActiveProgress")\n            pass\n            # raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar = None\n\n        self.file.flush()\n\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            sys.stdout = self._stdout #= sys.stdout\n\n            # sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n        t_ = time.time()\n        for _ in range(self.n - 1):  # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n            tc = time.time()\n            tic = max(0, self.dt - (tc - t_))\n            if tic > 0:\n                time.sleep(tic)\n            t_ = time.time()\n            self.pbar.update(1)\n\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n    if file == None:\n        file = sys.stdout\n    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n    print(first + dot_parts, end="", file=file)\n    last += extra\n    print(last, file=file)\n\n\ndef hide(func):\n    return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    return newDecorator\n\n\nhide = makeRegisteringDecorator(hide)\n\n\ndef extract_numbers(txt):\n    numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))\n    return all\n\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n        # print(self._cache.keys())\n        # for k in self._cache:\n        #     print(k)\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n            # This appears to be required since there are two caches. Otherwise, when deploy method is run twice,\n            # the cache will not be set correctly.\n            self._cache_put(key, value)\n        return value\n\n    return wrapper\n\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\nimport io\nimport sys\nimport time\nimport unittest\nfrom unittest.runner import _WritelnDecorator\nimport numpy as np\n\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    number = -1  # HAcky way to set question number.\n    show_progress_bar = True\n    unmute = False # Whether to redirect stdout.\n    cc = None\n    setUpClass_time = 3 # Estimated time to run setUpClass in TestCase. Must be set externally. See key (("ClassName", "setUpClass"), "time") in _cache.\n\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # TODO: Fix here. probably also needs to flush stdout.\n        self.printErrorList(\'ERROR\', [(test, res[\'stderr\']) for test, res in self.errors])\n        self.printErrorList(\'FAIL\',  [(test, res[\'stderr\']) for test, res in self.failures])\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addError(test, err)\n        err = self.errors[-1][1]\n        if hasattr(sys.stdout, \'log\'):\n            stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        else:\n            stdout = ""\n        self.errors[-1] = (self.errors[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n\n        if not hasattr(self, \'item_title_print\'):\n            # In case setUpClass() fails with an error the short description may not be set. This will fix that problem.\n            self.item_title_print = test.shortDescription()\n            if self.item_title_print is None:  # In case the short description is not set either...\n                self.item_title_print = test.id()\n\n\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        err = self.failures[-1][1]\n        stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        self.failures[-1] = (self.failures[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n        self.cc_terminate(success=False)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        msg = None\n        stdout = sys.stdout.log.readlines() # Only works because we set sys.stdout to a unitgrade.Logger\n\n        if hasattr(test, \'_get_outcome\'):\n            o = test._get_outcome()\n            if isinstance(o, dict):\n                key = (test.cache_id(), "return")\n                if key in o:\n                    msg = test._get_outcome()[key]\n\n        # print(sys.stdout.readlines())\n        self.successes.append((test, None))  # (test, message) (to be consistent with failures and errors).\n        self.successes[-1] = (self.successes[-1][0], {\'return\': msg,\n                                 \'stdout\': stdout,\n                                 \'stderr\': None})\n\n        self.cc_terminate()\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            self.cc.file.flush()\n            ss = self.item_title_print\n\n            state = "PASS" if success else "FAILED"\n\n            dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n            if self.show_progress_bar or True:\n                print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n            else:\n                print(dot_parts, end="", file=self.cc.file)\n\n            if tsecs >= 0.5:\n                state += " (" + str(tsecs) + " seconds)"\n            print(state, file=self.cc.file)\n\n    def startTest(self, test):\n        name = test.__class__.__name__\n        if self.testsRun == 0 and hasattr(test.__class__, \'_cache2\'): # Disable this if the class is pure unittest.TestCase\n            # This is the first time we are running a test. i.e. we can time the time taken to call setupClass.\n            if test.__class__._cache2 is None:\n                test.__class__._cache2 = {}\n            test.__class__._cache2[((name, \'setUpClass\'), \'time\')] = time.time() - self.t_start\n\n        self.testsRun += 1\n        item_title = test.shortDescription()  # Better for printing (get from cache).\n\n        if item_title == None:\n            # For unittest framework where getDescription may return None.\n            item_title = self.getDescription(test)\n        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n        # if self.show_progress_bar or True:\n        estimated_time = test.__class__._cache.get(((name, test._testMethodName), \'time\'), 100) if hasattr(test.__class__, \'_cache\') else 4\n        self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        # else:\n        #     print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n        self._test = test\n        # if not self.unmute:\n        self._stdout = sys.stdout # Redundant. remove later.\n        sys.stdout = Logger(io.StringIO(), write_to_stdout=self.unmute)\n\n    def stopTest(self, test):\n        # if not self.unmute:\n        buff = sys.stdout.log\n        sys.stdout = self._stdout # redundant.\n        buff.close()\n        super().stopTest(test)\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            self.t_start = time.time()\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.framework.py>"\n\n            cc = ActiveProgress(t=self.setUpClass_time, title=q_title_print, show_progress_bar=self.show_progress_bar, mute_stdout=not self.unmute)\n            self.cc = cc\n\n\n    def _restoreStdout(self):  # Used when setting up the test.\n        if self._previousTestClass is None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            if self.show_progress_bar:\n                print(self.cc.title, end="")\n            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        stream = io.StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\nimport importnb\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport inspect\nimport colorama\nimport unittest\nimport time\nimport textwrap\nimport urllib.parse\nimport requests\nimport ast\nimport numpy\n\ncolorama.init(autoreset=True)  # auto resets your settings after every output\nnumpy.seterr(all=\'raise\')\n\ndef setup_dir_by_class(C, base_dir):\n    name = C.__class__.__name__\n    return base_dir, name\n\n# def dash(func):\n#     if isinstance(func, classmethod):\n#         raise Exception("the @dash-decorator was used in the wrong order. The right order is: @dash\\n@classmethod\\ndef setUpClass(cls):")\n#\n#     def wrapper(*args, **kwargs):\n#         print("Something is happening before the function is called.")\n#         func(*args, **kwargs)\n#         print("Something is happening after the function is called.")\n#     return wrapper\n\nclass Report:\n    title = "report title"\n    abbreviate_questions = False # Should the test items start with \'Question ...\' or just be q1).\n    version = None # A version number of the report (1.0). Used to compare version numbers with online resources.\n    url = None  # Remote location of this problem.\n\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    _remote_check_cooldown_seconds = 1  # Seconds between remote check of report.\n    nL = 120  # Maximum line width\n    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.\n    _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc.\n\n    @classmethod\n    def reset(cls):\n        for (q, _) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    @classmethod\n    def mfile(clc):\n        return inspect.getfile(clc)\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._file()), "unitgrade_data/main_config_"+ os.path.basename(self._file()[:-3]) + ".json")\n\n\n    def _is_run_in_grade_mode(self):\n        """ True if this report is being run as part of a grade run. """\n        return self._file().endswith("_grade.py") # Not sure I love this convention.\n\n    def _import_base_relative(self):\n        if hasattr(self.pack_imports[0], \'__path__\'):\n            root_dir = self.pack_imports[0].__path__[0]\n        else:\n            root_dir = self.pack_imports[0].__file__\n\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n        relative_path = relative_path.replace("\\\\", "/")\n\n        return root_dir, relative_path, modules\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        for (q, _) in self.questions:\n            q.nL = self.nL  # Set maximum line length.\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        loader = unittest.TestLoader()\n        for q, _ in self.questions:\n            start = time.time()  #\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time() - start\n            q.time = total\n\n    def _setup_answers(self, with_coverage=False, verbose=True):\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = True\n                q._report = self\n        for q, _ in self.questions:\n            q._setup_answers_mode = True\n            # q._generate_artifacts = False # Disable artifact generation when the report is being set up.\n\n        evaluate_report_student(self, unmute=verbose, noprogress=not verbose, generate_artifacts=False) # Disable artifact generation.\n\n        # self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            # print(self.questions)\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                # print("q is", q())\n                report_cache[q.__qualname__] = q._cache2\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in framework.py\': True}\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = False\n        # report_cache is saved on a per-question basis.\n        # it could also contain additional information such as runtime metadata etc. This may not be appropriate to store with the invidivual questions(?).\n        # In this case, the function should be re-defined.\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n        self._config = payloads[\'config\']\n\n    def _check_remote_versions(self):\n        if self.url is None:\n            return\n        url = self.url\n        if not url.endswith("/"):\n            url += "/"\n        snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot"\n        # print("Sanity checking time using snapshot", snapshot_file)\n        # print("and using self-identified file", self._file())\n\n        if os.path.isfile(snapshot_file):\n            with open(snapshot_file, \'r\') as f:\n                t = f.read()\n                if (time.time() - float(t)) < self._remote_check_cooldown_seconds:\n                    return\n        # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n\n        if self.url.startswith("https://gitlab"):\n            # Try to turn url into a \'raw\' format.\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            # url = self.url\n            url = url.replace("-/tree", "-/raw")\n            # print(url)\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false")\n            # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n            if self._is_run_in_grade_mode():\n                remote_source = requests.get(raw_url).text\n                with open(self._file(), \'r\') as f:\n                    local_source = f.read()\n                if local_source != remote_source:\n                    print("\\nThe local version of this report is not identical to the remote version which can be found at")\n                    print(self.url)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of grade script does not match the remote version. Please update using git pull")\n                #\n                # # node = ast.parse(text)\n                # # classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                #\n                # # for b in classes.body:\n                # #     print(b.)\n                #     # if b.targets[0].id == "version":\n                #         # print(b)\n                #         # print(b.value)\n                #         version_remote = b.value.value\n                #         break\n                # if version_remote != self.version:\n            else:\n                text = requests.get(raw_url).text\n                node = ast.parse(text)\n                classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                for b in classes.body:\n                    # print(b.)\n                    if b.targets[0].id == "version":\n                        # print(b)\n                        # print(b.value)\n                        version_remote = b.value.value\n                        break\n                if version_remote != self.version:\n                    print("\\nThe version of this report", self.version, "does not match the version of the report on git", version_remote)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.")\n\n                for (q,_) in self.questions:\n                    qq = q(skip_remote_check=True)\n                    cfile = qq._cache_file()\n\n                    relpath = os.path.relpath(cfile, os.path.dirname(self._file()))\n                    relpath = relpath.replace("\\\\", "/")\n                    raw_url = urllib.parse.urljoin(url, relpath + "?inline=false")\n                    # requests.get(raw_url)\n\n                    with open(cfile, \'rb\') as f:\n                        b1 = f.read()\n\n                    b2 = requests.get(raw_url).content\n                    if b1 != b2:\n                        print("\\nQuestion ", qq.title, "relies on the data file", cfile)\n                        print("However, it appears that this file is missing or in a different version than the most recent found here:")\n                        print(self.url)\n                        print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                        print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                        print("This can be done by simply running the command")\n                        print("> git pull")\n                        print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!")\n                        raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.")\n\n                t = time.time()\n                if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"):\n                    with open(snapshot_file, \'w\') as f:\n                        f.write(f"{t}")\n\ndef get_hints(ss):\n    """ Extract all blocks of the forms:\n\n    Hints:\n    bla-bla.\n\n    and returns the content unaltered.\n    """\n    if ss == None:\n        return None\n    try:\n        ss = textwrap.dedent(ss)\n        ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n        hints = ["hints:", "hint:"]\n        indexes = [ss.lower().find(h) for h in hints]\n        j = np.argmax(indexes)\n        if indexes[j] == -1:\n            return None\n        h = hints[j]\n        ss = ss[ss.lower().find(h) + len(h) + 1:]\n        ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n        ss = textwrap.dedent(ss).strip()\n        # if ss.startswith(\'*\'):\n        #     ss = ss[1:].strip()\n        return ss\n    except Exception as e:\n        print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n    # a = 234\n    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache. Ensures method always produce same result.\n    _cache2 = None  # User-written cache.\n    _with_coverage = False\n    _covcache = None # Coverage cache. Written to if _with_coverage is true.\n    _report = None  # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n    _run_in_report_mode = True\n\n    _generate_artifacts = True # Whether the file will generate the artifact .json files. This is used in the _grade-script mode.\n    # If true, the tests will not fail when cache is used. This is necesary since otherwise the cache will not be updated\n    # during setup, and the deploy script must be run many times.\n    _setup_answers_mode = False\n\n    def capture(self):\n        if hasattr(self, \'_stdout\') and self._stdout is not None:\n            file = self._stdout\n        else:\n            file = sys.stdout\n        return Capturing2(stdout=file)\n\n    # def __call__(self, *args, **kwargs):\n    #     a = \'234\'\n    #     pass\n\n\n    @classmethod\n    def question_title(cls):\n        """ Return the question title """\n        if cls.__doc__ is not None:\n            title = cls.__doc__.strip().splitlines()[0].strip()\n            if not (title.startswith("Hints:") or title.startswith("Hint:") ):\n                return title\n        return cls.__qualname__\n\n    def run(self, result):\n        if not self._generate_artifacts:\n            return super().run(result)\n        from unittest.case import TestCase\n        from pupdb.core import PupDB\n\n        db = PupDB(self._artifact_file())\n        db.set(\'run_id\', np.random.randint(1000*1000))\n        db.set("state", "running")\n        db.set(\'coverage_files_changed\', None)\n\n        # print("Re-running test")\n        _stdout = sys.stdout\n        _stderr = sys.stderr\n\n        std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n        # stderr_capture = StdCapturing(sys.stderr, db=db)\n        # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db)\n\n        try:\n            # Run this unittest and record all of the output.\n            # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n            # sys.stdout = stdout_capture\n            sys.stderr = std_capture.dummy_stderr\n            sys.stdout = std_capture.dummy_stdout\n\n            result_ = TestCase.run(self, result)\n\n            from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n            # print(result_._excinfo[0])\n            actual_errors = []\n            for test, err in self._error_fed_during_run:\n                if err is None:\n                    continue\n                else:\n                    import traceback\n                    # traceback.print_tb(err[2])\n                    actual_errors.append(err)\n\n            if len(actual_errors) > 0:\n                ex, exi, tb = actual_errors[0]\n                exi.__traceback__ = tb\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                db.set(\'state\', \'fail\')\n            else:\n                db.set(\'state\', \'pass\')\n        except Exception as e:\n            print("-----------------.///////////////////////////////////////////////////////////////")\n            # print(e)\n            import traceback\n            traceback.print_exc()\n            raise e\n        finally:\n            sys.stdout = _stdout\n            sys.stderr = _stderr\n            std_capture.close()\n        return result_\n\n    @classmethod\n    def before_setup_called(cls):\n        print("hi")\n        # print("I am called before the fucking class is fucking made. setUpClass has been broken!")\n        pass\n\n    _setUpClass_not_overwritten = False\n    @classmethod\n    def setUpClass(cls) -> None:\n        cls._setUpClass_not_overwritten = True\n\n    @classmethod\n    def __new__(cls, *args, **kwargs):\n        old_setup = cls.setUpClass\n        def new_setup():\n            raise Exception("Bad")\n            cls.before_setup_called()\n            if cls.setUpClass == UTestCase.setUpClass:\n                print("Setup class not overwritten")\n            else:\n                print("Setup class is overwritten")\n\n            try:\n                old_setup()\n            except Exception as e:\n                raise e\n            finally:\n                pass\n\n        # cls.setUpClass = new_setup\n        ci = super().__new__(cls)\n        ci.setUpClass = new_setup\n        return ci\n\n    # def inheritors(klass):\n    # import new\n    # z.q = new.instancemethod(method, z, None)\n\n    # def __getattr__(self, item):\n    #     # print("hi there ", item)\n    #     return super().__getattr__(item)\n    #\n    # def __getattribute__(self, item):\n    #     # print("oh hello sexy. ", item)\n    #     return super().__getattribute__(item)\n\n\n\n    def _callSetUp(self):\n        if self._with_coverage:\n            if self._covcache is None:\n                self._covcache = {}\n            import coverage\n            self.cov = coverage.Coverage(data_file=None)\n            self.cov.start()\n        self.setUp()\n\n    def _callTearDown(self):\n        self.tearDown()\n        # print("Teardown.")\n        if self._with_coverage:\n            # print("with cov")\n            from pathlib import Path\n            from snipper import snipper_main\n            try:\n                self.cov.stop()\n            except Exception as e:\n                print("Something went wrong while tearing down coverage test")\n                print(e)\n            data = self.cov.get_data()\n            base, _, _ = self._report._import_base_relative()\n            for file in data.measured_files():\n                file = os.path.normpath(file)\n                root = Path(base)\n                child = Path(file)\n                if root in child.parents:\n                    # print("Reading file", child)\n                    with open(child, \'r\') as f:\n                        s = f.read()\n                    lines = s.splitlines()\n                    garb = \'GARBAGE\'\n                    lines2 = snipper_main.censor_code(lines, keep=True)\n                    # print("\\n".join(lines2))\n                    if len(lines) != len(lines2):\n                        for k in range(len(lines)):\n                            print(k, ">", lines[k], "::::::::", lines2[k])\n                        print("Snipper failure; line lenghts do not agree. Exiting..")\n                        print(child, "len(lines) == len(lines2)", len(lines), len(lines2))\n                        import sys\n                        sys.exit()\n\n                    assert len(lines) == len(lines2)\n                    for ll in data.contexts_by_lineno(file):\n                        l = ll-1\n                        if l < len(lines2) and lines2[l].strip() == garb:\n                            # print("Got a hit at l", l)\n                            rel = os.path.relpath(child, root)\n                            cc = self._covcache\n                            j = 0\n                            for j in range(l, -1, -1):\n                                if "def" in lines2[j] or "class" in lines2[j]:\n                                    break\n                            from snipper.legacy import gcoms\n\n                            fun = lines2[j]\n                            comments, _ = gcoms("\\n".join(lines2[j:l]))\n                            if rel not in cc:\n                                cc[rel] = {}\n                            cc[rel][fun] = (l, "\\n".join(comments))\n                            # print("found", rel, fun)\n                            self._cache_put((self.cache_id(), \'coverage\'), self._covcache)\n\n    def shortDescriptionStandard(self):\n        sd = super().shortDescription()\n        if sd is None or sd.strip().startswith("Hints:") or sd.strip().startswith("Hint:"):\n            sd = self._testMethodName\n        return sd\n\n    def shortDescription(self):\n        sd = self.shortDescriptionStandard()\n        title = self._cache_get((self.cache_id(), \'title\'), sd)\n        return title if title is not None else sd\n\n    @property\n    def title(self):\n        return self.shortDescription()\n\n    @title.setter\n    def title(self, value):\n        self._cache_put((self.cache_id(), \'title\'), value)\n\n    def _get_outcome(self):\n        if not hasattr(self.__class__, \'_outcome\') or self.__class__._outcome is None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        self._ensure_cache_exists()  # Make sure cache is there.\n        if self._testMethodDoc is not None:\n            self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n        self._cache2[(self.cache_id(), \'assert\')] = {}\n        res = testMethod()\n        elapsed = time.time() - t\n        self._get_outcome()[ (self.cache_id(), "return") ] = res\n        self._cache_put((self.cache_id(), "time"), elapsed)\n\n\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return c, m\n\n    def __init__(self, *args, skip_remote_check=False, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self._assert_cache_index = 0\n        # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the\n        # cache exists locally.\n        # Find the report class this class is defined within.\n        if skip_remote_check:\n            return\n        import importlib, inspect\n        found_reports = []\n        # print("But do I have report", self._report)\n        # print("I think I am module", self.__module__)\n        # print("Importlib says", importlib.import_module(self.__module__))\n        # This will delegate you to the wrong main clsas when running in grade mode.\n        for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass):\n            # print("checking", cls)\n            if issubclass(cls, Report):\n                for q,_ in cls.questions:\n                    if q == self.__class__:\n                        found_reports.append(cls)\n        if len(found_reports) == 0:\n            pass # This case occurs when the report _grade script is being run.\n            # raise Exception("This question is not a member of a report. Very, very odd.")\n        if len(found_reports) > 1:\n            raise Exception("This question is a member of multiple reports. That should not be the case -- don\'t get too creative.")\n        if len(found_reports) > 0:\n            report = found_reports[0]\n            report()._check_remote_versions()\n\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def get_expected_test_value(self):\n        key = (self.cache_id(), \'assert\')\n        id = self._assert_cache_index\n        cache = self._cache_get(key)\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        return _expected\n\n    def wrap_assert(self, assert_fun, first, *args, **kwargs):\n        key = (self.cache_id(), \'assert\')\n        if not self._cache_contains(key):\n            print("Warning, framework missing", key)\n            self.__class__._cache[key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.\n        cache = self._cache_get(key)\n        id = self._assert_cache_index\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        if not id in cache:\n            print("Warning, framework missing cache index", key, "id =", id, " - The test will be skipped for now.")\n            if self._setup_answers_mode:\n                _expected = first # Bypass by setting equal to first. This is in case multiple self.assertEqualC\'s are run in a row and have to be set.\n\n        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n        cache[id] = first\n        self._cache_put(key, cache)\n        self._assert_cache_index += 1\n        if not self._setup_answers_mode:\n            assert_fun(first, _expected, *args, **kwargs)\n        else:\n            try:\n                assert_fun(first, _expected, *args, **kwargs)\n            except Exception as e:\n                print("Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.", id)\n                print("> first", first)\n                print("> expected", _expected)\n                print(e)\n\n\n    def assertEqualC(self, first, msg=None):\n        self.wrap_assert(self.assertEqual, first, msg)\n\n    def _shape_equal(self, first, second):\n        a1 = np.asarray(first).squeeze()\n        a2 = np.asarray(second).squeeze()\n        msg = None\n        msg = "" if msg is None else msg\n        if len(msg) > 0:\n            msg += "\\n"\n        self.assertEqual(a1.shape, a2.shape, msg=msg + "Dimensions of input data does not agree.")\n        assert(np.all(np.isinf(a1) == np.isinf(a2)))  # Check infinite part.\n        a1[np.isinf(a1)] = 0\n        a2[np.isinf(a2)] = 0\n        diff = np.abs(a1 - a2)\n        return diff\n\n    def assertLinf(self, first, second=None, tol=1e-5, msg=None):\n        """ Test in the L_infinity norm.\n        :param first:\n        :param second:\n        :param tol:\n        :param msg:\n        :return:\n        """\n        if second is None:\n            return self.wrap_assert(self.assertLinf, first, tol=tol, msg=msg)\n        else:\n            diff = self._shape_equal(first, second)\n            np.testing.assert_allclose(first, second, atol=tol)\n            \n            max_diff = max(diff.flat)\n            if max_diff >= tol:\n                from unittest.util import safe_repr\n                # msg = f\'{safe_repr(first)} != {safe_repr(second)} : Not equal within tolerance {tol}\'\n                # print(msg)\n                # np.testing.assert_almost_equal\n                # import numpy as np\n                print(f"|first - second|_max = {max_diff} > {tol} ")\n                np.testing.assert_almost_equal(first, second)\n                # If the above fail, make sure to throw an error:\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=f\'Not equal within tolerance {tol}\')\n\n    def assertL2(self, first, second=None, tol=1e-5, msg=None, relative=False):\n        if second is None:\n            return self.wrap_assert(self.assertL2, first, tol=tol, msg=msg, relative=relative)\n        else:\n            # We first test using numpys build-in testing method to see if one coordinate deviates a great deal.\n            # This gives us better output, and we know that the coordinate wise difference is lower than the norm difference.\n            if not relative:\n                np.testing.assert_allclose(first, second, atol=tol)\n            diff = self._shape_equal(first, second)\n            diff = ( ( np.asarray( diff.flatten() )**2).sum() )**.5\n\n            scale = (2/(np.linalg.norm(np.asarray(first).flat) + np.linalg.norm(np.asarray(second).flat)) ) if relative else 1\n            max_diff = diff*scale\n            if max_diff >= tol:\n                msg = "" if msg is None else msg\n                print(f"|first - second|_2 = {max_diff} > {tol} ")\n                # Deletage to numpy. Let numpy make nicer messages.\n                np.testing.assert_almost_equal(first, second) # This function does not take a msg parameter.\n                # Make sure to throw an error no matter what.\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=msg + f"Not equal within tolerance {tol}")\n\n    def _cache_file(self):\n        return os.path.dirname(inspect.getabsfile(type(self))) + "/unitgrade_data/" + self.__class__.__name__ + ".pkl"\n\n    @classmethod\n    def _artifact_file_for_setUpClass(cls):\n        cf = os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__\n        return os.path.join(os.path.dirname(self._cache_file()), "-setUpClass.json")\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._cache_file()), \'-\'.join(self.cache_id()) + ".json")\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache is not None:  # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self._cache_file()\n        if os.path.exists(cfile):\n            try:\n                with open(cfile, \'rb\') as f:\n                    data = pickle.load(f)\n                self.__class__._cache = data\n            except Exception as e:\n                print("Cache file did not exist:", cfile)\n                print(e)\n        else:\n            print("Warning! data file not found", cfile)\n\n    def _get_coverage_files(self):\n        key = (self.cache_id(), \'coverage\')\n        # CC = None\n        # if self._cache_contains(key):\n        return self._cache_get(key, None)\n        # return CC\n\n    def _get_hints(self):\n        """\n            This code is run when the test is set up to generate the hints and store them in an artifact file. It may be beneficial to simple compute them beforehand\n            and store them in the local unitgrade pickle file. This code is therefore expected to superceede the alterative code later.\n        """\n        hints = []\n        # print("Getting hint")\n        key = (self.cache_id(), \'coverage\')\n        if self._cache_contains(key):\n            CC = self._cache_get(key)\n            # cl, m = self.cache_id()\n            # print("Getting hint using", CC)\n            # Insert newline to get better formatting.\n            # gprint(\n            #     f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n            for file in CC:\n                rec = CC[file]\n                # gprint(f">   * {file}")\n                for l in rec:\n                    _, comments = CC[file][l]\n                    hint = get_hints(comments)\n\n                    if hint != None:\n                        hints.append((hint, file, l))\n\n        doc = self._testMethodDoc\n        # print("doc", doc)\n        if doc is not None:\n            hint = get_hints(self._testMethodDoc)\n            if hint is not None:\n                hints = [(hint, None, self.cache_id()[1])] + hints\n\n        return hints\n\n    def _feedErrorsToResult(self, result, errors):\n        """ Use this to show hints on test failure.\n        It feeds error to the result -- so if there are errors, they will crop up here\n        """\n        self._error_fed_during_run = errors.copy() # import to copy the error list.\n\n        # result._test._error_fed_during_run = errors.copy()\n\n        if not isinstance(result, UTextResult):\n            er = [e for e, v in errors if v != None]\n            # print("Errors are", errors)\n            if len(er) > 0:\n                hints = []\n                key = (self.cache_id(), \'coverage\')\n                if self._cache_contains(key):\n                    CC = self._cache_get(key)\n                    cl, m = self.cache_id()\n                    # Insert newline to get better formatting.\n                    gprint(f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n                    for file in CC:\n                        rec = CC[file]\n                        gprint(f">   * {file}")\n                        for l in rec:\n                            _, comments = CC[file][l]\n                            hint = get_hints(comments)\n\n                            if hint != None:\n                                hints.append((hint, file, l) )\n                            gprint(f">      - {l}")\n\n                er = er[0]\n\n                doc = er._testMethodDoc\n                # print("doc", doc)\n                if doc is not None:\n                    hint = get_hints(er._testMethodDoc)\n                    if hint is not None:\n                        hints = [(hint, None, self.cache_id()[1] )] + hints\n                if len(hints) > 0:\n                    # print(hints)\n                    for hint, file, method in hints:\n                        s = (f"\'{method.strip()}\'" if method is not None else "")\n                        if method is not None and file is not None:\n                            s += " in "\n                        try:\n                            s += (file.strip() if file is not None else "")\n                            gprint(">")\n                            gprint("> Hints (from " + s + ")")\n                            gprint(textwrap.indent(hint, ">   "))\n                        except Exception as e:\n                            print("Bad stuff in hints. ")\n                            print(hints)\n        # result._last_errors = errors\n        super()._feedErrorsToResult(result, errors)\n        b = 234\n\n    def startTestRun(self):\n        super().startTestRun()\n\n# subclasses = set()\n# work = [UTestCase]\n# while work:\n#     parent = work.pop()\n#     for child in parent.__subclasses__():\n#         if child not in subclasses:\n#             subclasses.add(child)\n#             work.append(child)\n# return subclasses\n# import builtins\n# ga = builtins.getattr\n# def my_funky_getatt(a,b,c=None):\n#     print("ga", a, b, c)\n#     return ga(a,b,c)\n# builtins.getattr = my_funky_getatt\n\nclass Required:\n    pass\n\nclass ParticipationTest(UTestCase,Required):\n    max_group_size = None\n    students_in_group = None\n    workload_assignment = {\'Question 1\': [1, 0, 0]}\n\n    def test_students(self):\n        pass\n\n    def test_workload(self):\n        pass\n\n# 817, 705\nclass NotebookTestCase(UTestCase):\n    notebook = None\n    _nb = None\n    @classmethod\n    def setUpClass(cls) -> None:\n        with Capturing():\n            cls._nb = importnb.Notebook.load(cls.notebook)\n\n    @property\n    def nb(self):\n        return self.__class__._nb\n\n# import __builtin__\n# all subclasses which are known at this point.\n# def get_all_subclasses(cls):\n#     all_subclasses = []\n#\n#     for subclass in cls.__subclasses__():\n#         all_subclasses.append(subclass)\n#         all_subclasses.extend(get_all_subclasses(subclass))\n#\n#     return all_subclasses\n#\n# a = 234\n\nimport hashlib\nimport io\nimport tokenize\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,\n                            show_tol_err=False, show_privisional=True, noprogress=None,\n                            generate_artifacts=True):\n    args = parser.parse_args()\n    if noprogress is None:\n        noprogress = args.noprogress\n\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,\n                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err,\n                                          generate_artifacts=generate_artifacts)\n\n\n    if question is None and show_privisional:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        # testcase_methods = list(testCaseClass.__dict__.keys())\n        ls = []\n        for C in testCaseClass.mro():\n            if issubclass(C, unittest.TestCase):\n                ls = list(C.__dict__.keys()) + ls\n        testcase_methods = ls\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n    # print("Started: " + dt_string)\n    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += f" version {report.version}"\n    print(s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n        if not report.abbreviate_questions:\n            q_title_print = "Question %i: %s"%(n+1, qtitle)\n        else:\n            q_title_print = "q%i) %s" % (n + 1, qtitle)\n\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        # q_ = {} # Gather score in this class.\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n        UTextResult.number = n\n        UTextResult.nL = report.nL\n        UTextResult.unmute = unmute # Hacky as well.\n        UTextResult.setUpClass_time = q._cache.get(((q.__name__, \'setUpClass\'), \'time\'), 3) if hasattr(q, \'_cache\') and q._cache is not None else 3\n\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        details = {}\n        for s, msg in res.successes + res.failures + res.errors:\n            # from unittest.suite import _ErrorHolder\n            # from unittest import _Err\n            # if isinstance(s, _ErrorHolder)\n            if hasattr(s, \'_testMethodName\'):\n                key = (q.__name__, s._testMethodName)\n            else:\n                # In case s is an _ErrorHolder (unittest.suite)\n                key = (q.__name__, s.id())\n            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.\n\n            detail = {}\n            if (s,msg) in res.successes:\n                detail[\'status\'] = "pass"\n            elif (s,msg) in res.failures:\n                detail[\'status\'] = \'fail\'\n            elif (s,msg) in res.errors:\n                detail[\'status\'] = \'error\'\n            else:\n                raise Exception("Status not known.")\n\n            nice_title = s.title\n            detail = {**detail, **msg, \'nice_title\': nice_title}#[\'message\'] = msg\n            details[key] = detail\n\n        # q_[s._testMethodName] = ("pass", None)\n        # for (s,msg) in res.failures:\n        #     q_[s._testMethodName] = ("fail", msg)\n        # for (s,msg) in res.errors:\n        #     q_[s._testMethodName] = ("error", msg)\n        # res.successes[0]._get_outcome()\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': details, \'title\': qtitle, \'name\': q.__name__,\n                   }\n        q.obtained = obtained\n        q.possible = possible\n        # print(q._cache)\n        # print(q._covcache)\n        s1 = f" * q{n+1})   Total"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\ndef python_code_str_id(python_code, strip_comments_and_docstring=True):\n    s = python_code\n\n    if strip_comments_and_docstring:\n        try:\n            s = remove_comments_and_docstrings(s)\n        except Exception as e:\n            print("--"*10)\n            print(python_code)\n            print(e)\n\n    s = "".join([c.strip() for c in s.split()])\n    hash_object = hashlib.blake2b(s.encode())\n    return hash_object.hexdigest()\n\n\ndef file_id(file, strip_comments_and_docstring=True):\n    with open(file, \'r\') as f:\n        # s = f.read()\n        return python_code_str_id(f.read())\n\n\ndef remove_comments_and_docstrings(source):\n    """\n    Returns \'source\' minus comments and docstrings.\n    """\n    io_obj = io.StringIO(source)\n    out = ""\n    prev_toktype = tokenize.INDENT\n    last_lineno = -1\n    last_col = 0\n    for tok in tokenize.generate_tokens(io_obj.readline):\n        token_type = tok[0]\n        token_string = tok[1]\n        start_line, start_col = tok[2]\n        end_line, end_col = tok[3]\n        ltext = tok[4]\n        # The following two conditionals preserve indentation.\n        # This is necessary because we\'re not using tokenize.untokenize()\n        # (because it spits out code with copious amounts of oddly-placed\n        # whitespace).\n        if start_line > last_lineno:\n            last_col = 0\n        if start_col > last_col:\n            out += (" " * (start_col - last_col))\n        # Remove comments:\n        if token_type == tokenize.COMMENT:\n            pass\n        # This series of conditionals removes docstrings:\n        elif token_type == tokenize.STRING:\n            if prev_toktype != tokenize.INDENT:\n        # This is likely a docstring; double-check we\'re not inside an operator:\n                if prev_toktype != tokenize.NEWLINE:\n                    # Note regarding NEWLINE vs NL: The tokenize module\n                    # differentiates between newlines that start a new statement\n                    # and newlines inside of operators such as parens, brackes,\n                    # and curly braces.  Newlines inside of operators are\n                    # NEWLINE and newlines that start new code are NL.\n                    # Catch whole-module docstrings:\n                    if start_col > 0:\n                        # Unlabelled indentation means we\'re inside an operator\n                        out += token_string\n                    # Note regarding the INDENT token: The tokenize module does\n                    # not label indentation inside of an operator (parens,\n                    # brackets, and curly braces) as actual indentation.\n                    # For example:\n                    # def foo():\n                    #     "The spaces before this docstring are tokenize.INDENT"\n                    #     test = [\n                    #         "The spaces before this string do not get a token"\n                    #     ]\n        else:\n            out += token_string\n        prev_toktype = token_type\n        last_col = end_col\n        last_lineno = end_line\n    return out\n\nimport lzma\nimport base64\nimport textwrap\nimport hashlib\nimport bz2\nimport pickle\nimport os\nimport zipfile\nimport io\n\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    f = m.__file__\n    if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'):\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        im = __import__(m.__name__.split(\'.\')[0])\n        if isinstance(im, list):\n            print("im is a list")\n            print(im)\n        # the __path__ attribute *may* be a string in some cases. I had to fix this.\n        print("path.:",  __import__(m.__name__.split(\'.\')[0]).__path__)\n        # top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__[0]\n        module_import = False\n\n    found_hashes = {}\n    # pycode = {}\n    resources[\'pycode\'] = {}\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)\n                    zip.write(fpath, v)\n                    if not fpath.endswith("_grade.py"): # Exclude grade files.\n                        with open(fpath, \'r\') as f:\n                            s = f.read()\n                        found_hashes[v] = python_code_str_id(s)\n                        resources[\'pycode\'][v] = s\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    resources[\'blake2b_file_hashes\'] = found_hashes\n    return resources, top_package\n\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_report_source_include(report):\n    sources = {}\n    # print("")\n    # if not args.autolab:\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            _, report_relative_location, module_import = report._import_base_relative()\n\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'report_module_specification\'] = module_import\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            print(f" * {m.__name__}")\n    return sources\n\ndef gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):\n    # n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab,\n                                          generate_artifacts=False,\n                                          )\n    print("")\n    sources = {}\n    if not args.autolab:\n        results[\'sources\'] = sources = gather_report_source_include(report)\n\n    token_plain = """\n# This file contains your results. Do not edit its content. Simply upload it as it is. """\n\n    s_include = [token_plain]\n    known_hashes = []\n    cov_files = []\n    use_coverage = True\n    if report._config is not None:\n        known_hashes = report._config[\'blake2b_file_hashes\']\n        for Q, _ in report.questions:\n            use_coverage = use_coverage and isinstance(Q, UTestCase)\n            for key in Q._cache:\n                if len(key) >= 2 and key[1] == "coverage":\n                    for f in Q._cache[key]:\n                        cov_files.append(f)\n\n    for s in sources.values():\n        for f_rel, hash in s[\'blake2b_file_hashes\'].items():\n            if hash in known_hashes and f_rel not in cov_files and use_coverage:\n                print("Skipping", f_rel)\n            else:\n                if token_include_plaintext_source:\n                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)\n                    s_include.append("")\n                    s_include.append(s[\'pycode\'][f_rel])\n                    s_include.append("")\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = f"_v{report.version}" if report.version is not None else ""\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.normpath(os.path.join(output_dir, token))\n\n    save_token(results, "\\n".join(s_include), token)\n\n    if not args.autolab:\n        print("> Testing token file integrity...", sep="")\n        load_token(token)\n        print("Done!")\n        print(" ")\n        print("To get credit for your results, please upload the single unmodified file: ")\n        print(">", token)\n\n\n\ndef dict2picklestring(dd):\n    b = lzma.compress(pickle.dumps(dd))\n    b_hash = hashlib.blake2b(b).hexdigest()\n    return base64.b64encode(b).decode("utf-8"), b_hash\n\ndef picklestring2dict(picklestr):\n    b = base64.b64decode(picklestr)\n    hash = hashlib.blake2b(b).hexdigest()\n    dictionary = pickle.loads(lzma.decompress(b))\n    return dictionary, hash\n\n\ntoken_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70\ndef save_token(dictionary, plain_text, file_out):\n    if plain_text is None:\n        plain_text = ""\n    if len(plain_text) == 0:\n        plain_text = "Start token file"\n    plain_text = plain_text.strip()\n    b, b_hash = dict2picklestring(dictionary)\n    b_l1 = len(b)\n    b = "."+b+"."\n    b = "\\n".join( textwrap.wrap(b, 180))\n\n    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]\n    with open(file_out, \'w\') as f:\n        f.write("\\n".join(out))\n\ndef load_token(file_in):\n    with open(file_in, \'r\') as f:\n        s = f.read()\n    splt = s.split(token_sep)\n    data = splt[-1]\n    info = splt[-2]\n    head = token_sep.join(splt[:-2])\n    plain_text=head.strip()\n    hash, l1 = info.split(" ")\n    data = "".join( data.strip()[1:-1].splitlines() )\n    l1 = int(l1)\n    dictionary, b_hash = picklestring2dict(data)\n    assert len(data) == l1\n    assert b_hash == hash.strip()\n    return dictionary, plain_text\n\n\ndef source_instantiate(name, report1_source, payload):\n    # print("Executing sources", report1_source)\n    eval("exec")(report1_source, globals())\n    # print("Loaind gpayload..")\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    return report\n\n\n__version__ = "0.1.28.1"\n\nfrom cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo\nimport time\nimport numpy as np\nimport pickle\nimport os\n# from unitgrade.framework import dash\n\ndef mk_bad():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 100, \'x2\': 300}\n        pickle.dump(d, f)\n\ndef mk_ok():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 1, \'x2\': 2}\n        pickle.dump(d, f)\n\n\ndef formatHeader(fn):\n    from functools import wraps\n    @wraps(fn)\n    def wrapper(*args, **kw):\n        return fn(*args, **kw)\n    return wrapper\n\n\nclass Numpy(UTestCase):\n    z = 234\n\n    def __getattr__(self, item):\n        print("hi there ", item)\n        return super().__getattr__(item)\n\n    def __getattribute__(self, item):\n        print("oh hello sexy. ", item)\n        return super().__getattribute__(item)\n\n    @classmethod\n    # @dash\n    def setUpClass(cls) -> None:\n        print("Dum di dai, I am running some setup code here.")\n        for i in range(10):\n            print("Hello world", i)\n        print("Set up.") # must be handled seperately.\n        # assert False\n\n    def test_bad(self):\n        """\n        Hints:\n            * Remember to properly de-indent your code.\n            * Do more stuff which works.\n        """\n        # raise Exception("This ended poorly")\n        # print("Here we go")\n        # return\n        # self.assertEqual(1, 1)\n        with open(os.path.dirname(__file__)+"/db.pkl", \'rb\') as f:\n            d = pickle.load(f)\n        # print(d)\n        # assert False\n        # for i in range(10):\n        from tqdm import tqdm\n        for i in tqdm(range(100)):\n            # print("The current number is", i)\n            time.sleep(.01)\n        # self.assertEqual(1, d[\'x1\'])\n\n        # assert False\n        pass\n\n    def test_weights(self):\n        """\n            Hints:\n            * Try harder!\n            * Check the chapter on linear regression.\n        """\n        n = 3\n        m = 2\n        np.random.seed(5)\n        # from numpy import asdfaskdfj\n        # X = np.random.randn(n, m)\n        # y = np.random.randn(n)\n        foo()\n        # assert 2 == 3\n        # raise Exception("Bad exit")\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertL2(linear_regression_weights(X, y), msg="the message")\n        self.assertEqual(1, 1)\n        # self.assertEqual(1,2)\n        return "THE RESULT OF THE TEST"\n\n\nimport cs108\nclass Report2(Report):\n    title = "CS 101 Report 2"\n    questions = [\n        (Numpy, 10),\n        ]\n    pack_imports = [cs108]'
-report1_payload = '8004955c020000000000007d94288c054e756d7079947d942868018c0a7365745570436c6173739486948c0474696d65948694473f5a7c000000000068018c08746573745f6261649486948c057469746c6594869468076801680786948c066173736572749486947d9468016807869468058694473ff041eac000000068018c0c746573745f77656967687473948694680986946811680168118694680c86947d9468016811869468058694473f4d120000000000758c06636f6e666967947d948c13626c616b6532625f66696c655f686173686573945d94288c806533626432393138326330346430393339383337663665656532383132353463633933316664663433633765663532623139303636636161653463623836343739636131303266323234623536353565313732336462306264383035323931303538313161336561626364396234616366663139366435396332386532666261948c803765633535633764313137383538356537343162346564653165353764353030393433646539303935326361636331666662643036633435636232613163666561663962613636383032383562643235323062343166623933373061646231643330633531386261383737363935373031333239653562383534663934373536948c803862366232646531396233353632346166633863653935343566303639643361383765313131343766346562623461666163333734313435626639656663636334336165346637623535383164633166393133363264653030636332633134366162393631633736363931356331636663356231363662356536383333353831946573752e'
+report1_source = '# from unitgrade import hide\n# from unitgrade import utils\n# import os\n# import lzma\n# import pickle\n\n# DONT\'t import stuff here since install script requires __version__\n\n# def cache_write(object, file_name, verbose=True):\n#     # raise Exception("bad")\n#     # import compress_pickle\n#     dn = os.path.dirname(file_name)\n#     if not os.path.exists(dn):\n#         os.mkdir(dn)\n#     if verbose: print("Writing cache...", file_name)\n#     with lzma.open(file_name, \'wb\', ) as f:\n#         pickle.dump(object, f)\n#     if verbose: print("Done!")\n#\n#\n# def cache_exists(file_name):\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     return os.path.exists(file_name)\n#\n#\n# def cache_read(file_name):\n#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     if os.path.exists(file_name):\n#         try:\n#             with lzma.open(file_name, \'rb\') as f:\n#                 return pickle.load(f)\n#         except Exception as e:\n#             print("Tried to load a bad pickle file at", file_name)\n#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n#             print(e)\n#             # return pickle.load(f)\n#     else:\n#         return None\n\n\n\nimport re\nimport sys\nimport threading\nimport time\nimport lzma\nimport hashlib\nimport pickle\nimport base64\nfrom collections import namedtuple\nfrom io import StringIO\nimport numpy as np\nimport tqdm\nfrom colorama import Fore\nfrom functools import _make_key\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\n\ndef gprint(s):\n    print(f"{Fore.LIGHTGREEN_EX}{s}")\n\n\nmyround = lambda x: np.round(x)  # required for obfuscation.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\n"""\nClean up the various output-related helper classes.\n"""\nclass Logger(object):\n    def __init__(self, buffer, write_to_stdout=True):\n        # assert False\n        self.terminal = sys.stdout\n        self.write_to_stdout = write_to_stdout\n        self.log = buffer\n\n    def write(self, message):\n        if self.write_to_stdout:\n            self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\n\nclass Capturing(list):\n    def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n        self._stdout = stdout\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True):  # don\'t put arguments here.\n        self._stdout = sys.stdout if self._stdout == None else self._stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO()  # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n    def __exit__(self, *args):\n        lines = self._stringio.getvalue().splitlines()\n        txt = "\\n".join(lines)\n        numbers = extract_numbers(rm_progress_bar(txt))\n        self.extend(lines)\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n        self.output = txt\n        self.numbers = numbers\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct + 1)\n            if i > 0 and l.find("|", i + 1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None, mute_stdout=False):\n        if file == None:\n            file = sys.stdout\n        self.file = file\n        self.mute_stdout = mute_stdout\n        self._running = False\n        self.title = title\n        self.dt = 0.025\n        self.n = max(1, int(np.round(t / self.dt)))\n        self.show_progress_bar = show_progress_bar\n        self.pbar = None\n\n        if start:\n            self.start()\n\n    def start(self):\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            self._stdout = sys.stdout\n            sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            print("Stopping a progress bar which is not running (class unitgrade.utils.ActiveProgress")\n            pass\n            # raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar = None\n\n        self.file.flush()\n\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            sys.stdout = self._stdout #= sys.stdout\n\n            # sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n        t_ = time.time()\n        for _ in range(self.n - 1):  # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n            tc = time.time()\n            tic = max(0, self.dt - (tc - t_))\n            if tic > 0:\n                time.sleep(tic)\n            t_ = time.time()\n            self.pbar.update(1)\n\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n    if file == None:\n        file = sys.stdout\n    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n    print(first + dot_parts, end="", file=file)\n    last += extra\n    print(last, file=file)\n\n\ndef hide(func):\n    return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    return newDecorator\n\n\nhide = makeRegisteringDecorator(hide)\n\n\ndef extract_numbers(txt):\n    numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))\n    return all\n\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n        # print(self._cache.keys())\n        # for k in self._cache:\n        #     print(k)\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n            # This appears to be required since there are two caches. Otherwise, when deploy method is run twice,\n            # the cache will not be set correctly.\n            self._cache_put(key, value)\n        return value\n\n    return wrapper\n\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n""" Methods responsible for turning a dictionary into a string that can be pickled or put into a json file. """\ndef dict2picklestring(dd):\n    """\n    Turns a dictionary into a string with some compression.\n\n    :param dd:\n    :return:\n    """\n    b = lzma.compress(pickle.dumps(dd))\n    b_hash = hashlib.blake2b(b).hexdigest()\n    return base64.b64encode(b).decode("utf-8"), b_hash\n\ndef picklestring2dict(picklestr):\n    """ Reverse of the above method: Turns the string back into a dictionary. """\n    b = base64.b64decode(picklestr)\n    hash = hashlib.blake2b(b).hexdigest()\n    dictionary = pickle.loads(lzma.decompress(b))\n    return dictionary, hash\n\ntoken_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70\ndef load_token(file_in):\n    """ We put this one here to allow loading of token files for the dashboard. """\n    with open(file_in, \'r\') as f:\n        s = f.read()\n    splt = s.split(token_sep)\n    data = splt[-1]\n    info = splt[-2]\n    head = token_sep.join(splt[:-2])\n    plain_text=head.strip()\n    hash, l1 = info.split(" ")\n    data = "".join( data.strip()[1:-1].splitlines() )\n    l1 = int(l1)\n    dictionary, b_hash = picklestring2dict(data)\n    assert len(data) == l1\n    assert b_hash == hash.strip()\n    return dictionary, plain_text\n\n\n\n## Key/value store related.\n\n\nimport io\nimport sys\nimport time\nimport unittest\nfrom unittest.runner import _WritelnDecorator\nimport numpy as np\n\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    number = -1  # HAcky way to set question number.\n    show_progress_bar = True\n    unmute = False # Whether to redirect stdout.\n    cc = None\n    setUpClass_time = 3 # Estimated time to run setUpClass in TestCase. Must be set externally. See key (("ClassName", "setUpClass"), "time") in _cache.\n\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # TODO: Fix here. probably also needs to flush stdout.\n        self.printErrorList(\'ERROR\', [(test, res[\'stderr\']) for test, res in self.errors])\n        self.printErrorList(\'FAIL\',  [(test, res[\'stderr\']) for test, res in self.failures])\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addError(test, err)\n        err = self.errors[-1][1]\n        if hasattr(sys.stdout, \'log\'):\n            stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        else:\n            stdout = ""\n        self.errors[-1] = (self.errors[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n\n        if not hasattr(self, \'item_title_print\'):\n            # In case setUpClass() fails with an error the short description may not be set. This will fix that problem.\n            self.item_title_print = test.shortDescription()\n            if self.item_title_print is None:  # In case the short description is not set either...\n                self.item_title_print = test.id()\n\n\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        err = self.failures[-1][1]\n        stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        self.failures[-1] = (self.failures[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n        self.cc_terminate(success=False)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        msg = None\n        stdout = sys.stdout.log.readlines() # Only works because we set sys.stdout to a unitgrade.Logger\n\n        if hasattr(test, \'_get_outcome\'):\n            o = test._get_outcome()\n            if isinstance(o, dict):\n                key = (test.cache_id(), "return")\n                if key in o:\n                    msg = test._get_outcome()[key]\n\n        # print(sys.stdout.readlines())\n        self.successes.append((test, None))  # (test, message) (to be consistent with failures and errors).\n        self.successes[-1] = (self.successes[-1][0], {\'return\': msg,\n                                 \'stdout\': stdout,\n                                 \'stderr\': None})\n\n        self.cc_terminate()\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            self.cc.file.flush()\n            ss = self.item_title_print\n\n            state = "PASS" if success else "FAILED"\n\n            dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n            if self.show_progress_bar or True:\n                print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n            else:\n                print(dot_parts, end="", file=self.cc.file)\n\n            if tsecs >= 0.5:\n                state += " (" + str(tsecs) + " seconds)"\n            print(state, file=self.cc.file)\n\n    def startTest(self, test):\n        name = test.__class__.__name__\n        if self.testsRun == 0 and hasattr(test.__class__, \'_cache2\'): # Disable this if the class is pure unittest.TestCase\n            # This is the first time we are running a test. i.e. we can time the time taken to call setupClass.\n            if test.__class__._cache2 is None:\n                test.__class__._cache2 = {}\n            test.__class__._cache2[((name, \'setUpClass\'), \'time\')] = time.time() - self.t_start\n\n        self.testsRun += 1\n        item_title = test.shortDescription()  # Better for printing (get from cache).\n\n        if item_title == None:\n            # For unittest framework where getDescription may return None.\n            item_title = self.getDescription(test)\n        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n        # if self.show_progress_bar or True:\n        estimated_time = test.__class__._cache.get(((name, test._testMethodName), \'time\'), 100) if hasattr(test.__class__, \'_cache\') else 4\n        self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        # else:\n        #     print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n        self._test = test\n        # if not self.unmute:\n        self._stdout = sys.stdout # Redundant. remove later.\n        sys.stdout = Logger(io.StringIO(), write_to_stdout=self.unmute)\n\n    def stopTest(self, test):\n        # if not self.unmute:\n        buff = sys.stdout.log\n        sys.stdout = self._stdout # redundant.\n        buff.close()\n        super().stopTest(test)\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            self.t_start = time.time()\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.framework.py>"\n\n            cc = ActiveProgress(t=self.setUpClass_time, title=q_title_print, show_progress_bar=self.show_progress_bar, mute_stdout=not self.unmute)\n            self.cc = cc\n\n\n    def _restoreStdout(self):  # Used when setting up the test.\n        if self._previousTestClass is None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            if self.show_progress_bar:\n                print(self.cc.title, end="")\n            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        stream = io.StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\nimport importnb\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport inspect\nimport colorama\nimport unittest\nimport time\nimport textwrap\nimport urllib.parse\nimport requests\nimport ast\nimport numpy\nfrom diskcache import Cache\n\ncolorama.init(autoreset=True)  # auto resets your settings after every output\nnumpy.seterr(all=\'raise\')\n\n\ndef setup_dir_by_class(C, base_dir):\n    name = C.__class__.__name__\n    return base_dir, name\n\n\nclass DKPupDB:\n    def __init__(self, artifact_file, use_pupdb=True):\n        # Make a double-headed disk cache thingy.\n        self.dk = Cache(os.path.dirname(artifact_file)) # Start in this directory.\n        self.name_ = os.path.basename(artifact_file[:-5])\n        if self.name_ not in self.dk:\n            self.dk[self.name_] = dict()\n        self.use_pupdb = use_pupdb\n        if self.use_pupdb:\n            from pupdb.core import PupDB\n            self.db_ = PupDB(artifact_file)\n\n    def __setitem__(self, key, value):\n        if self.use_pupdb:\n            self.db_.set(key, value)\n        with self.dk.transact():\n            d = self.dk[self.name_]\n            d[key] = value\n            self.dk[self.name_] = d\n            self.dk[self.name_ + "-updated"] = True\n\n    def __getitem__(self, item):\n        v = self.dk[self.name_][item]\n        if self.use_pupdb:\n            v2 = self.db_.get(item)\n            if v != v2:\n                print("Mismatch v1, v2 for ", item)\n        return v\n\n    def keys(self): # This one is also deprecated.\n        return tuple(self.dk[self.name_].keys()) #.iterkeys())\n        # return self.db_.keys()\n\n    def set(self, item, value): # This one is deprecated.\n        self[item] = value\n\n    def get(self, item, default=None):\n        return self[item] if item in self else default\n\n    def __contains__(self, item):\n        return item in self.dk[self.name_] #keys()\n        # return item in self.dk\n\n\n_DASHBOARD_COMPLETED_MESSAGE = "Dashboard> Evaluation completed."\n\n# Consolidate this code.\nclass classmethod_dashboard(classmethod):\n    def __init__(self, f):\n        def dashboard_wrap(cls: UTestCase):\n            if not cls._generate_artifacts:\n                f(cls)\n                return\n\n            db = DKPupDB(cls._artifact_file_for_setUpClass())\n            r = np.random.randint(1000 * 1000)\n            db.set(\'run_id\', r)\n            db.set(\'coverage_files_changed\', None)\n\n            state_ = \'fail\'\n            try:\n                _stdout = sys.stdout\n                _stderr = sys.stderr\n                std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n                # Run this unittest and record all of the output.\n                # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n                # sys.stdout = stdout_capture\n                sys.stderr = std_capture.dummy_stderr\n                sys.stdout = std_capture.dummy_stdout\n                db.set("state", "running")\n                f(cls)\n                state_ = \'pass\'\n            except Exception as e:\n                from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n                state_ = \'fail\'\n                db.set(\'state\', state_)\n                exi = e\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                raise e\n            finally:\n                db.set(\'state\', state_)\n                std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n                sys.stdout = _stdout\n                sys.stderr = _stderr\n                std_capture.close()\n        super().__init__(dashboard_wrap)\n\nclass Report:\n    title = "report title"\n    abbreviate_questions = False # Should the test items start with \'Question ...\' or just be q1).\n    version = None # A version number of the report (1.0). Used to compare version numbers with online resources.\n    url = None  # Remote location of this problem.\n\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    _remote_check_cooldown_seconds = 1  # Seconds between remote check of report.\n    nL = 120  # Maximum line width\n    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.\n    _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc.\n\n    @classmethod\n    def reset(cls):\n        for (q, _) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    @classmethod\n    def mfile(clc):\n        return inspect.getfile(clc)\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._file()), "unitgrade_data/main_config_"+ os.path.basename(self._file()[:-3]) + ".artifacts.pkl")\n\n    def _is_run_in_grade_mode(self):\n        """ True if this report is being run as part of a grade run. """\n        return self._file().endswith("_grade.py") # Not sure I love this convention.\n\n    def _import_base_relative(self):\n        if hasattr(self.pack_imports[0], \'__path__\'):\n            root_dir = self.pack_imports[0].__path__[0]\n        else:\n            root_dir = self.pack_imports[0].__file__\n\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n        relative_path = relative_path.replace("\\\\", "/")\n        return root_dir, relative_path, modules\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        for (q, _) in self.questions:\n            q.nL = self.nL  # Set maximum line length.\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        loader = unittest.TestLoader()\n        for q, _ in self.questions:\n            start = time.time()  #\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time() - start\n            q.time = total\n\n    def _setup_answers(self, with_coverage=False, verbose=True):\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = True\n                q._report = self\n        for q, _ in self.questions:\n            q._setup_answers_mode = True\n            # q._generate_artifacts = False # Disable artifact generation when the report is being set up.\n\n        evaluate_report_student(self, unmute=verbose, noprogress=not verbose, generate_artifacts=False) # Disable artifact generation.\n\n        # self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            # print(self.questions)\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                # print("q is", q())\n                report_cache[q.__qualname__] = q._cache2\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in framework.py\': True}\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = False\n\n        # report_cache is saved on a per-question basis.\n        # it could also contain additional information such as runtime metadata etc. This may not be appropriate to store with the invidivual questions(?).\n        # In this case, the function should be re-defined.\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n        self._config = payloads[\'config\']\n\n    def _check_remote_versions(self):\n        if self.url is None:\n            return\n        url = self.url\n        if not url.endswith("/"):\n            url += "/"\n        snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot"\n        if os.path.isfile(snapshot_file):\n            with open(snapshot_file, \'r\') as f:\n                t = f.read()\n                if (time.time() - float(t)) < self._remote_check_cooldown_seconds:\n                    return\n\n        if self.url.startswith("https://gitlab"):\n            # Try to turn url into a \'raw\' format.\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            # url = self.url\n            url = url.replace("-/tree", "-/raw")\n            # print(url)\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false")\n            # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n            if self._is_run_in_grade_mode():\n                remote_source = requests.get(raw_url).text\n                with open(self._file(), \'r\') as f:\n                    local_source = f.read()\n                if local_source != remote_source:\n                    print("\\nThe local version of this report is not identical to the remote version which can be found at")\n                    print(self.url)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of grade script does not match the remote version. Please update using git pull")\n            else:\n                text = requests.get(raw_url).text\n                node = ast.parse(text)\n                classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                for b in classes.body:\n                    # print(b.)\n                    if b.targets[0].id == "version":\n                        # print(b)\n                        # print(b.value)\n                        version_remote = b.value.value\n                        break\n                if version_remote != self.version:\n                    print("\\nThe version of this report", self.version, "does not match the version of the report on git", version_remote)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.")\n\n                for (q,_) in self.questions:\n                    qq = q(skip_remote_check=True)\n                    cfile = q._cache_file()\n\n                    relpath = os.path.relpath(cfile, os.path.dirname(self._file()))\n                    relpath = relpath.replace("\\\\", "/")\n                    raw_url = urllib.parse.urljoin(url, relpath + "?inline=false")\n                    # requests.get(raw_url)\n\n                    with open(cfile, \'rb\') as f:\n                        b1 = f.read()\n\n                    b2 = requests.get(raw_url).content\n                    if b1 != b2:\n                        print("\\nQuestion ", qq.title, "relies on the data file", cfile)\n                        print("However, it appears that this file is missing or in a different version than the most recent found here:")\n                        print(self.url)\n                        print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                        print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                        print("This can be done by simply running the command")\n                        print("> git pull")\n                        print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!")\n                        raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.")\n\n                t = time.time()\n                if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"):\n                    with open(snapshot_file, \'w\') as f:\n                        f.write(f"{t}")\n\ndef get_hints(ss):\n    """ Extract all blocks of the forms:\n\n    Hints:\n    bla-bla.\n\n    and returns the content unaltered.\n    """\n    if ss == None:\n        return None\n    try:\n        ss = textwrap.dedent(ss)\n        ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n        hints = ["hints:", "hint:"]\n        indexes = [ss.lower().find(h) for h in hints]\n        j = np.argmax(indexes)\n        if indexes[j] == -1:\n            return None\n        h = hints[j]\n        ss = ss[ss.lower().find(h) + len(h) + 1:]\n        ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n        ss = textwrap.dedent(ss).strip()\n        # if ss.startswith(\'*\'):\n        #     ss = ss[1:].strip()\n        return ss\n    except Exception as e:\n        print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n    # a = 234\n    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache. Ensures method always produce same result.\n    _cache2 = None  # User-written cache.\n    _with_coverage = False\n    _covcache = None # Coverage cache. Written to if _with_coverage is true.\n    _report = None  # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n    _run_in_report_mode = True\n\n    _generate_artifacts = True # Whether the file will generate the artifact .json files. This is used in the _grade-script mode.\n    # If true, the tests will not fail when cache is used. This is necesary since otherwise the cache will not be updated\n    # during setup, and the deploy script must be run many times.\n    _setup_answers_mode = False\n\n    def capture(self):\n        if hasattr(self, \'_stdout\') and self._stdout is not None:\n            file = self._stdout\n        else:\n            file = sys.stdout\n        return Capturing2(stdout=file)\n\n    @classmethod\n    def question_title(cls):\n        """ Return the question title """\n        if cls.__doc__ is not None:\n            title = cls.__doc__.strip().splitlines()[0].strip()\n            if not (title.startswith("Hints:") or title.startswith("Hint:") ):\n                return title\n        return cls.__qualname__\n\n    def run(self, result):\n        # print("Run called in test framework...", self._generate_artifacts)\n        if not self._generate_artifacts:\n            return super().run(result)\n        from unittest.case import TestCase\n\n\n        db = DKPupDB(self._artifact_file())\n        db.set("state", "running")\n        db.set(\'run_id\', np.random.randint(1000*1000))\n        db.set(\'coverage_files_changed\', None)\n\n\n        _stdout = sys.stdout\n        _stderr = sys.stderr\n\n        std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n        # stderr_capture = StdCapturing(sys.stderr, db=db)\n        # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db)\n        state_ = None\n        try:\n            # Run this unittest and record all of the output.\n            # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n            # sys.stdout = stdout_capture\n            sys.stderr = std_capture.dummy_stderr\n            sys.stdout = std_capture.dummy_stdout\n\n            result_ = TestCase.run(self, result)\n\n            from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n            # print(result_._excinfo[0])\n            actual_errors = []\n            for test, err in self._error_fed_during_run:\n                if err is None:\n                    continue\n                else:\n                    import traceback\n                    # traceback.print_tb(err[2])\n                    actual_errors.append(err)\n\n            if len(actual_errors) > 0:\n                ex, exi, tb = actual_errors[0]\n                exi.__traceback__ = tb\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                # db.set(\'state\', \'fail\')\n                state_ = "fail"\n            else:\n                state_ = "pass"\n        except Exception as e:\n            state_ = "fail"\n            import traceback\n            traceback.print_exc()\n            raise e\n        finally:\n            db.set(\'state\', state_)\n            std_capture.dummy_stdout.write_mute(_DASHBOARD_COMPLETED_MESSAGE)\n            sys.stdout = _stdout\n            sys.stderr = _stderr\n            std_capture.close()\n        return result_\n\n    def _callSetUp(self):\n        if self._with_coverage:\n            if self._covcache is None:\n                self._covcache = {}\n            import coverage\n            self.cov = coverage.Coverage(data_file=None)\n            self.cov.start()\n        self.setUp()\n\n    def _callTearDown(self):\n        self.tearDown()\n        # print("Teardown.")\n        if self._with_coverage:\n            # print("with cov")\n            from pathlib import Path\n            from snipper import snipper_main\n            try:\n                self.cov.stop()\n            except Exception as e:\n                print("Something went wrong while tearing down coverage test")\n                print(e)\n            data = self.cov.get_data()\n            base, _, _ = self._report._import_base_relative()\n            for file in data.measured_files():\n                file = os.path.normpath(file)\n                root = Path(base)\n                child = Path(file)\n                if root in child.parents:\n                    # print("Reading file", child)\n                    with open(child, \'r\') as f:\n                        s = f.read()\n                    lines = s.splitlines()\n                    garb = \'GARBAGE\'\n                    lines2 = snipper_main.censor_code(lines, keep=True)\n                    # print("\\n".join(lines2))\n                    if len(lines) != len(lines2):\n                        for k in range(len(lines)):\n                            print(k, ">", lines[k], "::::::::", lines2[k])\n                        print("Snipper failure; line lenghts do not agree. Exiting..")\n                        print(child, "len(lines) == len(lines2)", len(lines), len(lines2))\n                        import sys\n                        sys.exit()\n\n                    assert len(lines) == len(lines2)\n                    for ll in data.contexts_by_lineno(file):\n                        l = ll-1\n                        if l < len(lines2) and lines2[l].strip() == garb:\n                            # print("Got a hit at l", l)\n                            rel = os.path.relpath(child, root)\n                            cc = self._covcache\n                            j = 0\n                            for j in range(l, -1, -1):\n                                if "def" in lines2[j] or "class" in lines2[j]:\n                                    break\n                            from snipper.legacy import gcoms\n\n                            fun = lines2[j]\n                            comments, _ = gcoms("\\n".join(lines2[j:l]))\n                            if rel not in cc:\n                                cc[rel] = {}\n                            cc[rel][fun] = (l, "\\n".join(comments))\n                            # print("found", rel, fun)\n                            self._cache_put((self.cache_id(), \'coverage\'), self._covcache)\n\n    def shortDescriptionStandard(self):\n        sd = super().shortDescription()\n        if sd is None or sd.strip().startswith("Hints:") or sd.strip().startswith("Hint:"):\n            sd = self._testMethodName\n        return sd\n\n    def shortDescription(self):\n        sd = self.shortDescriptionStandard()\n        title = self._cache_get((self.cache_id(), \'title\'), sd)\n        return title if title is not None else sd\n\n    @property\n    def title(self):\n        return self.shortDescription()\n\n    @title.setter\n    def title(self, value):\n        self._cache_put((self.cache_id(), \'title\'), value)\n\n    def _get_outcome(self):\n        if not hasattr(self.__class__, \'_outcome\') or self.__class__._outcome is None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        self._ensure_cache_exists()  # Make sure cache is there.\n        if self._testMethodDoc is not None:\n            self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n        self._cache2[(self.cache_id(), \'assert\')] = {}\n        res = testMethod()\n        elapsed = time.time() - t\n        self._get_outcome()[ (self.cache_id(), "return") ] = res\n        self._cache_put((self.cache_id(), "time"), elapsed)\n\n\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return c, m\n\n    def __init__(self, *args, skip_remote_check=False, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self._assert_cache_index = 0\n        # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the\n        # cache exists locally.\n        # Find the report class this class is defined within.\n        if skip_remote_check:\n            return\n        import importlib, inspect\n        found_reports = []\n        # print("But do I have report", self._report)\n        # print("I think I am module", self.__module__)\n        # print("Importlib says", importlib.import_module(self.__module__))\n        # This will delegate you to the wrong main clsas when running in grade mode.\n        for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass):\n            # print("checking", cls)\n            if issubclass(cls, Report):\n                for q,_ in cls.questions:\n                    if q == self.__class__:\n                        found_reports.append(cls)\n        if len(found_reports) == 0:\n            pass # This case occurs when the report _grade script is being run.\n            # raise Exception("This question is not a member of a report. Very, very odd.")\n        if len(found_reports) > 1:\n            raise Exception("This question is a member of multiple reports. That should not be the case -- don\'t get too creative.")\n        if len(found_reports) > 0:\n            report = found_reports[0]\n            report()._check_remote_versions()\n\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def get_expected_test_value(self):\n        key = (self.cache_id(), \'assert\')\n        id = self._assert_cache_index\n        cache = self._cache_get(key)\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        return _expected\n\n    def wrap_assert(self, assert_fun, first, *args, **kwargs):\n        key = (self.cache_id(), \'assert\')\n        if not self._cache_contains(key):\n            print("Warning, framework missing", key)\n            self.__class__._cache[key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.\n        cache = self._cache_get(key)\n        id = self._assert_cache_index\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        if not id in cache:\n            print("Warning, framework missing cache index", key, "id =", id, " - The test will be skipped for now.")\n            if self._setup_answers_mode:\n                _expected = first # Bypass by setting equal to first. This is in case multiple self.assertEqualC\'s are run in a row and have to be set.\n\n        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n        cache[id] = first\n        self._cache_put(key, cache)\n        self._assert_cache_index += 1\n        if not self._setup_answers_mode:\n            assert_fun(first, _expected, *args, **kwargs)\n        else:\n            try:\n                assert_fun(first, _expected, *args, **kwargs)\n            except Exception as e:\n                print("Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.", id)\n                print("> first", first)\n                print("> expected", _expected)\n                print(e)\n\n\n    def assertEqualC(self, first, msg=None):\n        self.wrap_assert(self.assertEqual, first, msg)\n\n    def _shape_equal(self, first, second):\n        a1 = np.asarray(first).squeeze()\n        a2 = np.asarray(second).squeeze()\n        msg = None\n        msg = "" if msg is None else msg\n        if len(msg) > 0:\n            msg += "\\n"\n        self.assertEqual(a1.shape, a2.shape, msg=msg + "Dimensions of input data does not agree.")\n        assert(np.all(np.isinf(a1) == np.isinf(a2)))  # Check infinite part.\n        a1[np.isinf(a1)] = 0\n        a2[np.isinf(a2)] = 0\n        diff = np.abs(a1 - a2)\n        return diff\n\n    def assertLinf(self, first, second=None, tol=1e-5, msg=None):\n        """ Test in the L_infinity norm.\n        :param first:\n        :param second:\n        :param tol:\n        :param msg:\n        :return:\n        """\n        if second is None:\n            return self.wrap_assert(self.assertLinf, first, tol=tol, msg=msg)\n        else:\n            diff = self._shape_equal(first, second)\n            np.testing.assert_allclose(first, second, atol=tol)\n            \n            max_diff = max(diff.flat)\n            if max_diff >= tol:\n                from unittest.util import safe_repr\n                # msg = f\'{safe_repr(first)} != {safe_repr(second)} : Not equal within tolerance {tol}\'\n                # print(msg)\n                # np.testing.assert_almost_equal\n                # import numpy as np\n                print(f"|first - second|_max = {max_diff} > {tol} ")\n                np.testing.assert_almost_equal(first, second)\n                # If the above fail, make sure to throw an error:\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=f\'Not equal within tolerance {tol}\')\n\n    def assertL2(self, first, second=None, tol=1e-5, msg=None, relative=False):\n        if second is None:\n            return self.wrap_assert(self.assertL2, first, tol=tol, msg=msg, relative=relative)\n        else:\n            # We first test using numpys build-in testing method to see if one coordinate deviates a great deal.\n            # This gives us better output, and we know that the coordinate wise difference is lower than the norm difference.\n            if not relative:\n                np.testing.assert_allclose(first, second, atol=tol)\n            diff = self._shape_equal(first, second)\n            diff = ( ( np.asarray( diff.flatten() )**2).sum() )**.5\n\n            scale = (2/(np.linalg.norm(np.asarray(first).flat) + np.linalg.norm(np.asarray(second).flat)) ) if relative else 1\n            max_diff = diff*scale\n            if max_diff >= tol:\n                msg = "" if msg is None else msg\n                print(f"|first - second|_2 = {max_diff} > {tol} ")\n                # Deletage to numpy. Let numpy make nicer messages.\n                np.testing.assert_almost_equal(first, second) # This function does not take a msg parameter.\n                # Make sure to throw an error no matter what.\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=msg + f"Not equal within tolerance {tol}")\n\n    @classmethod\n    def _cache_file(cls):\n        return os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__ + ".pkl"\n\n    @classmethod\n    def _artifact_file_for_setUpClass(cls):\n        file = os.path.join(os.path.dirname(cls._cache_file()), ""+cls.__name__+"-setUpClass.json")\n        print("_artifact_file_for_setUpClass(cls): will return", file, "__class__", cls)\n        # cf = os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__\n        return file\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self.__class__._cache_file()), \'-\'.join(self.cache_id()) + ".json")\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self.__class__._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache is not None:  # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self.__class__._cache_file()\n        if os.path.exists(cfile):\n            try:\n                with open(cfile, \'rb\') as f:\n                    data = pickle.load(f)\n                self.__class__._cache = data\n            except Exception as e:\n                print("Cache file did not exist:", cfile)\n                print(e)\n        else:\n            print("Warning! data file not found", cfile)\n\n    def _get_coverage_files(self):\n        key = (self.cache_id(), \'coverage\')\n        # CC = None\n        # if self._cache_contains(key):\n        return self._cache_get(key, []) # Anything wrong with the empty list?\n        # return CC\n\n    def _get_hints(self):\n        """\n            This code is run when the test is set up to generate the hints and store them in an artifact file. It may be beneficial to simple compute them beforehand\n            and store them in the local unitgrade pickle file. This code is therefore expected to superceede the alterative code later.\n        """\n        hints = []\n        # print("Getting hint")\n        key = (self.cache_id(), \'coverage\')\n        if self._cache_contains(key):\n            CC = self._cache_get(key)\n            # cl, m = self.cache_id()\n            # print("Getting hint using", CC)\n            # Insert newline to get better formatting.\n            # gprint(\n            #     f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n            for file in CC:\n                rec = CC[file]\n                # gprint(f">   * {file}")\n                for l in rec:\n                    _, comments = CC[file][l]\n                    hint = get_hints(comments)\n\n                    if hint != None:\n                        hints.append((hint, file, l))\n\n        doc = self._testMethodDoc\n        # print("doc", doc)\n        if doc is not None:\n            hint = get_hints(self._testMethodDoc)\n            if hint is not None:\n                hints = [(hint, None, self.cache_id()[1])] + hints\n\n        return hints\n\n    def _feedErrorsToResult(self, result, errors):\n        """ Use this to show hints on test failure.\n        It feeds error to the result -- so if there are errors, they will crop up here\n        """\n        self._error_fed_during_run = errors.copy() # import to copy the error list.\n\n        # result._test._error_fed_during_run = errors.copy()\n\n        if not isinstance(result, UTextResult):\n            er = [e for e, v in errors if v != None]\n            # print("Errors are", errors)\n            if len(er) > 0:\n                hints = []\n                key = (self.cache_id(), \'coverage\')\n                if self._cache_contains(key):\n                    CC = self._cache_get(key)\n                    cl, m = self.cache_id()\n                    # Insert newline to get better formatting.\n                    gprint(f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n                    for file in CC:\n                        rec = CC[file]\n                        gprint(f">   * {file}")\n                        for l in rec:\n                            _, comments = CC[file][l]\n                            hint = get_hints(comments)\n\n                            if hint != None:\n                                hints.append((hint, file, l) )\n                            gprint(f">      - {l}")\n\n                er = er[0]\n\n                doc = er._testMethodDoc\n                # print("doc", doc)\n                if doc is not None:\n                    hint = get_hints(er._testMethodDoc)\n                    if hint is not None:\n                        hints = [(hint, None, self.cache_id()[1] )] + hints\n                if len(hints) > 0:\n                    # print(hints)\n                    for hint, file, method in hints:\n                        s = (f"\'{method.strip()}\'" if method is not None else "")\n                        if method is not None and file is not None:\n                            s += " in "\n                        try:\n                            s += (file.strip() if file is not None else "")\n                            gprint(">")\n                            gprint("> Hints (from " + s + ")")\n                            gprint(textwrap.indent(hint, ">   "))\n                        except Exception as e:\n                            print("Bad stuff in hints. ")\n                            print(hints)\n        # result._last_errors = errors\n        super()._feedErrorsToResult(result, errors)\n        b = 234\n\n    def startTestRun(self):\n        super().startTestRun()\n\nclass Required:\n    pass\n\nclass ParticipationTest(UTestCase,Required):\n    max_group_size = None\n    students_in_group = None\n    workload_assignment = {\'Question 1\': [1, 0, 0]}\n\n    def test_students(self):\n        pass\n\n    def test_workload(self):\n        pass\n\n# 817, 705\nclass NotebookTestCase(UTestCase):\n    notebook = None\n    _nb = None\n    @classmethod\n    def setUpClass(cls) -> None:\n        with Capturing():\n            cls._nb = importnb.Notebook.load(cls.notebook)\n\n    @property\n    def nb(self):\n        return self.__class__._nb\n # 870.\n\nimport hashlib\nimport io\nimport tokenize\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,\n                            show_tol_err=False, show_privisional=True, noprogress=None,\n                            generate_artifacts=True):\n    args = parser.parse_args()\n    if noprogress is None:\n        noprogress = args.noprogress\n\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,\n                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err,\n                                          generate_artifacts=generate_artifacts)\n\n\n    if question is None and show_privisional:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        # testcase_methods = list(testCaseClass.__dict__.keys())\n        ls = []\n        for C in testCaseClass.mro():\n            if issubclass(C, unittest.TestCase):\n                ls = list(C.__dict__.keys()) + ls\n        testcase_methods = ls\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n    # print("Started: " + dt_string)\n    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += f" version {report.version}"\n    print(s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n        if not report.abbreviate_questions:\n            q_title_print = "Question %i: %s"%(n+1, qtitle)\n        else:\n            q_title_print = "q%i) %s" % (n + 1, qtitle)\n\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        # q_ = {} # Gather score in this class.\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n        UTextResult.number = n\n        UTextResult.nL = report.nL\n        UTextResult.unmute = unmute # Hacky as well.\n        UTextResult.setUpClass_time = q._cache.get(((q.__name__, \'setUpClass\'), \'time\'), 3) if hasattr(q, \'_cache\') and q._cache is not None else 3\n\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        details = {}\n        for s, msg in res.successes + res.failures + res.errors:\n            # from unittest.suite import _ErrorHolder\n            # from unittest import _Err\n            # if isinstance(s, _ErrorHolder)\n            if hasattr(s, \'_testMethodName\'):\n                key = (q.__name__, s._testMethodName)\n            else:\n                # In case s is an _ErrorHolder (unittest.suite)\n                key = (q.__name__, s.id())\n            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.\n\n            detail = {}\n            if (s,msg) in res.successes:\n                detail[\'status\'] = "pass"\n            elif (s,msg) in res.failures:\n                detail[\'status\'] = \'fail\'\n            elif (s,msg) in res.errors:\n                detail[\'status\'] = \'error\'\n            else:\n                raise Exception("Status not known.")\n\n            nice_title = s.title\n            detail = {**detail, **msg, \'nice_title\': nice_title}#[\'message\'] = msg\n            details[key] = detail\n\n        # q_[s._testMethodName] = ("pass", None)\n        # for (s,msg) in res.failures:\n        #     q_[s._testMethodName] = ("fail", msg)\n        # for (s,msg) in res.errors:\n        #     q_[s._testMethodName] = ("error", msg)\n        # res.successes[0]._get_outcome()\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': details, \'title\': qtitle, \'name\': q.__name__,\n                   }\n        q.obtained = obtained\n        q.possible = possible\n        # print(q._cache)\n        # print(q._covcache)\n        s1 = f" * q{n+1})   Total"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\ndef python_code_str_id(python_code, strip_comments_and_docstring=True):\n    s = python_code\n\n    if strip_comments_and_docstring:\n        try:\n            s = remove_comments_and_docstrings(s)\n        except Exception as e:\n            print("--"*10)\n            print(python_code)\n            print(e)\n\n    s = "".join([c.strip() for c in s.split()])\n    hash_object = hashlib.blake2b(s.encode())\n    return hash_object.hexdigest()\n\n\ndef file_id(file, strip_comments_and_docstring=True):\n    with open(file, \'r\') as f:\n        # s = f.read()\n        return python_code_str_id(f.read())\n\n\ndef remove_comments_and_docstrings(source):\n    """\n    Returns \'source\' minus comments and docstrings.\n    """\n    io_obj = io.StringIO(source)\n    out = ""\n    prev_toktype = tokenize.INDENT\n    last_lineno = -1\n    last_col = 0\n    for tok in tokenize.generate_tokens(io_obj.readline):\n        token_type = tok[0]\n        token_string = tok[1]\n        start_line, start_col = tok[2]\n        end_line, end_col = tok[3]\n        ltext = tok[4]\n        # The following two conditionals preserve indentation.\n        # This is necessary because we\'re not using tokenize.untokenize()\n        # (because it spits out code with copious amounts of oddly-placed\n        # whitespace).\n        if start_line > last_lineno:\n            last_col = 0\n        if start_col > last_col:\n            out += (" " * (start_col - last_col))\n        # Remove comments:\n        if token_type == tokenize.COMMENT:\n            pass\n        # This series of conditionals removes docstrings:\n        elif token_type == tokenize.STRING:\n            if prev_toktype != tokenize.INDENT:\n        # This is likely a docstring; double-check we\'re not inside an operator:\n                if prev_toktype != tokenize.NEWLINE:\n                    # Note regarding NEWLINE vs NL: The tokenize module\n                    # differentiates between newlines that start a new statement\n                    # and newlines inside of operators such as parens, brackes,\n                    # and curly braces.  Newlines inside of operators are\n                    # NEWLINE and newlines that start new code are NL.\n                    # Catch whole-module docstrings:\n                    if start_col > 0:\n                        # Unlabelled indentation means we\'re inside an operator\n                        out += token_string\n                    # Note regarding the INDENT token: The tokenize module does\n                    # not label indentation inside of an operator (parens,\n                    # brackets, and curly braces) as actual indentation.\n                    # For example:\n                    # def foo():\n                    #     "The spaces before this docstring are tokenize.INDENT"\n                    #     test = [\n                    #         "The spaces before this string do not get a token"\n                    #     ]\n        else:\n            out += token_string\n        prev_toktype = token_type\n        last_col = end_col\n        last_lineno = end_line\n    return out\n\nimport textwrap\nimport bz2\nimport pickle\nimport os\nimport zipfile\nimport io\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    f = m.__file__\n    if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'):\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        im = __import__(m.__name__.split(\'.\')[0])\n        if isinstance(im, list):\n            print("im is a list")\n            print(im)\n        # the __path__ attribute *may* be a string in some cases. I had to fix this.\n        print("path.:",  __import__(m.__name__.split(\'.\')[0]).__path__)\n        # top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__[0]\n        module_import = False\n\n    found_hashes = {}\n    # pycode = {}\n    resources[\'pycode\'] = {}\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)\n                    zip.write(fpath, v)\n                    if not fpath.endswith("_grade.py"): # Exclude grade files.\n                        with open(fpath, \'r\') as f:\n                            s = f.read()\n                        found_hashes[v] = python_code_str_id(s)\n                        resources[\'pycode\'][v] = s\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    resources[\'blake2b_file_hashes\'] = found_hashes\n    return resources, top_package\n\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_report_source_include(report):\n    sources = {}\n    # print("")\n    # if not args.autolab:\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            _, report_relative_location, module_import = report._import_base_relative()\n\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'report_module_specification\'] = module_import\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            print(f" * {m.__name__}")\n    return sources\n\ndef gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):\n    # n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab,\n                                          generate_artifacts=False,\n                                          )\n    print("")\n    sources = {}\n    if not args.autolab:\n        results[\'sources\'] = sources = gather_report_source_include(report)\n\n    token_plain = """\n# This file contains your results. Do not edit its content. Simply upload it as it is. """\n\n    s_include = [token_plain]\n    known_hashes = []\n    cov_files = []\n    use_coverage = True\n    if report._config is not None:\n        known_hashes = report._config[\'blake2b_file_hashes\']\n        for Q, _ in report.questions:\n            use_coverage = use_coverage and isinstance(Q, UTestCase)\n            for key in Q._cache:\n                if len(key) >= 2 and key[1] == "coverage":\n                    for f in Q._cache[key]:\n                        cov_files.append(f)\n\n    for s in sources.values():\n        for f_rel, hash in s[\'blake2b_file_hashes\'].items():\n            if hash in known_hashes and f_rel not in cov_files and use_coverage:\n                print("Skipping", f_rel)\n            else:\n                if token_include_plaintext_source:\n                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)\n                    s_include.append("")\n                    s_include.append(s[\'pycode\'][f_rel])\n                    s_include.append("")\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = f"_v{report.version}" if report.version is not None else ""\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.normpath(os.path.join(output_dir, token))\n\n    save_token(results, "\\n".join(s_include), token)\n\n    if not args.autolab:\n        print("> Testing token file integrity...", sep="")\n        load_token(token)\n        print("Done!")\n        print(" ")\n        print("To get credit for your results, please upload the single unmodified file: ")\n        print(">", token)\n\n\ndef save_token(dictionary, plain_text, file_out):\n    if plain_text is None:\n        plain_text = ""\n    if len(plain_text) == 0:\n        plain_text = "Start token file"\n    plain_text = plain_text.strip()\n    b, b_hash = dict2picklestring(dictionary)\n    b_l1 = len(b)\n    b = "."+b+"."\n    b = "\\n".join( textwrap.wrap(b, 180))\n\n    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]\n    with open(file_out, \'w\') as f:\n        f.write("\\n".join(out))\n\n\n\n\ndef source_instantiate(name, report1_source, payload):\n    # print("Executing sources", report1_source)\n    eval("exec")(report1_source, globals())\n    # print("Loaind gpayload..")\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    return report\n\n\n__version__ = "0.1.28.8"\n\nfrom cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo\nimport time\nimport numpy as np\nimport pickle\nimport os\n# from unitgrade.framework import dash\n\ndef mk_bad():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 100, \'x2\': 300}\n        pickle.dump(d, f)\n\ndef mk_ok():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 1, \'x2\': 2}\n        pickle.dump(d, f)\n\nclass Numpy(UTestCase):\n    z = 234\n\n    # def __getattr__(self, item):\n    #     print("hi there ", item)\n    #     return super().__getattr__(item)\n    #\n    # def __getattribute__(self, item):\n    #     print("oh hello sexy. ", item)\n    #     return super().__getattribute__(item)\n\n    @classmethod_dashboard\n    def setUpClass(cls) -> None:\n        print("Dum di dai, I am running some setup code here.")\n        for i in range(10):\n            print("Hello world", i)\n        print("Set up.") # must be handled seperately.\n        # assert False\n\n    # @cache\n    # def make_primes(self, n):\n    #     return primes(n)\n\n    # def setUp(self) -> None:\n    #     print("We are doing the setup thing.")\n\n    def test_bad(self):\n        """\n        Hints:\n            * Remember to properly de-indent your code.\n            * Do more stuff which works.\n        """\n        # raise Exception("This ended poorly")\n        # print("Here we go")\n        # return\n        # self.assertEqual(1, 1)\n        with open(os.path.dirname(__file__)+"/db.pkl", \'rb\') as f:\n            d = pickle.load(f)\n        # print(d)\n        # assert False\n        # for i in range(10):\n        from tqdm import tqdm\n        for i in tqdm(range(100)):\n            # print("The current number is", i)\n            time.sleep(.01)\n        self.assertEqual(1, d[\'x1\'])\n        for b in range(10):\n            self.assertEqualC(add(3, b))\n\n\n    def test_weights(self):\n        """\n            Hints:\n            * Try harder!\n            * Check the chapter on linear regression.\n        """\n        n = 3\n        m = 2\n        np.random.seed(5)\n        # from numpy import asdfaskdfj\n        # X = np.random.randn(n, m)\n        # y = np.random.randn(n)\n        foo()\n        # assert 2 == 3\n        # raise Exception("Bad exit")\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertL2(linear_regression_weights(X, y), msg="the message")\n        self.assertEqual(1, 1)\n        # self.assertEqual(1,2)\n        return "THE RESULT OF THE TEST"\n\n\nclass AnotherTest(UTestCase):\n    def test_more(self):\n        self.assertEqual(2,2)\n\n    def test_even_more(self):\n        self.assertEqual(2,2)\n\nimport cs108\nclass Report2(Report):\n    title = "CS 101 Report 2"\n    questions = [\n        (Numpy, 10), (AnotherTest, 20)\n        ]\n    pack_imports = [cs108]'
+report1_payload = '80049502030000000000007d94288c054e756d7079947d942868018c0a7365745570436c6173739486948c0474696d65948694473f38e8000000000068018c08746573745f6261649486948c057469746c6594869468076801680786948c066173736572749486947d94284b004b034b014b044b024b054b034b064b044b074b054b084b064b094b074b0a4b084b0b4b094b0c7568016807869468058694473ff06c5e0000000068018c0c746573745f77656967687473948694680986946811680168118694680c86947d9468016811869468058694473efa400000000000758c0b416e6f7468657254657374947d942868196803869468058694473f1470000000000068198c09746573745f6d6f7265948694680c86947d946819681d869468058694473ed700000000000068198c0e746573745f6576656e5f6d6f7265948694680c86947d9468196823869468058694473ed5000000000000758c06636f6e666967947d948c13626c616b6532625f66696c655f686173686573945d94288c806362363363336235383635306636313037643763663138646136303635666135373835666261626564643135316639653761633335313139323635623039393838623266653335373632303961333932616133656236633134636131316439646335393937343831633531373863313533393665656662313539653163373536948c803434656331613338643134373639626433653234323663386232366539303830356336313361386161653266333966663665633433363133666562363465303739373435323062306536353134353063303637623763633637636631366134313835653736346334383331373763333335303063626563626362336234646466948c803638306336353638323633623832303737313365616434306539323663643265363835336130613936353861386338343738393564363633643730643262343666616163333336396133636564366239623964303436346563316366656465326235306265376432626636313432313638383936663332306338353232313066946573752e'
 name="Report2"
 
 report = source_instantiate(name, report1_source, report1_payload)
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json
new file mode 100644
index 0000000000000000000000000000000000000000..e01b626d6395d114075df584e3b2822b1eda02d1
--- /dev/null
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json
@@ -0,0 +1 @@
+{"state": "pass", "run_id": 863304, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json
new file mode 100644
index 0000000000000000000000000000000000000000..710d65e381eb837b29dd244b6a14b8dc43e8fa75
--- /dev/null
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json
@@ -0,0 +1 @@
+{"state": "pass", "run_id": 282722, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest-test_more.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..1a07e6f6b3f4993e4e5fe85ae58a067520f4eade
Binary files /dev/null and b/devel/example_devel/instructor/cs108/unitgrade_data/AnotherTest.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json
new file mode 100644
index 0000000000000000000000000000000000000000..a46f0a4507d0b006f7c9702b473ccfe098f07e4c
--- /dev/null
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json
@@ -0,0 +1 @@
+{"run_id": 188727, "coverage_files_changed": null, "stdout": [[0, "Dum di dai, I am running some setup code here.\nHello world 0\nHello world 1\nHello world 2\nHello world 3\nHello world 4\nHello world 5\nHello world 6\nHello world 7\nHello world 8\nHello world 9\nSet up.\nDashboard> Evaluation completed."]], "state": "pass"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-setUpClass.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json
index a29717137fba18d66bf5a19895a0ffed6954aac1..4ecb597ff913319a6b1de4e7d745cf63a6972bfd 100644
--- a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json
@@ -1 +1 @@
-{"run_id": 578953, "state": "fail", "coverage_files_changed": null, "stdout": [[0, "oh hello sexy.  _testMethodName\noh hello sexy.  test_bad\noh hello sexy.  __class__\noh hello sexy.  __unittest_expecting_failure__\nhi there  __unittest_expecting_failure__\noh hello sexy.  _callSetUp\noh hello sexy.  _with_coverage\noh hello sexy.  setUp\noh hello sexy.  _callTestMethod\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _testMethodDoc\noh hello sexy.  _cache_put\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  shortDescriptionStandard\noh hello sexy.  _testMethodDoc\noh hello sexy.  _testMethodName\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _cache2\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\n\u001b[31m\r  0%|          | 0/100 [00:00<?, ?it/s]\u001b[37m"], [1, "\u001b[31m\r 10%|#         | 10/100 [00:00<00:00, 93.07it/s]\u001b[37m\u001b[31m\r 20%|##        | 20/100 [00:00<00:00, 91.92it/s]\u001b[37m"], [2, "\u001b[31m\r 30%|###       | 30/100 [00:00<00:00, 91.77it/s]\u001b[37m\u001b[31m\r 40%|####      | 40/100 [00:00<00:00, 92.25it/s]\u001b[37m"], [3, "\u001b[31m\r 50%|#####     | 50/100 [00:00<00:00, 92.44it/s]\u001b[37m"], [4, "\u001b[31m\r 60%|######    | 60/100 [00:00<00:00, 91.07it/s]\u001b[37m\u001b[31m\r 70%|#######   | 70/100 [00:00<00:00, 90.76it/s]\u001b[37m"], [5, "\u001b[31m\r 80%|########  | 80/100 [00:00<00:00, 89.57it/s]\u001b[37m"], [6, "\u001b[31m\r 89%|########9 | 89/100 [00:01<00:00, 83.70it/s]\u001b[37m\u001b[31m\r 99%|#########9| 99/100 [00:01<00:00, 85.69it/s]\u001b[37m\u001b[31m\u001b[37m\u001b[31m\r100%|##########| 100/100 [00:01<00:00, 88.82it/s]\u001b[37m\u001b[31m\n\u001b[37moh hello sexy.  assertEqual\noh hello sexy.  _getAssertEqualityFunc\noh hello sexy.  _type_equality_funcs\noh hello sexy.  _baseAssertEqual\noh hello sexy.  assertEqualC\noh hello sexy.  wrap_assert\noh hello sexy.  assertEqual\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  _cache_contains\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _cache_get\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _assert_cache_index\nWarning, framework missing cache index (('Numpy', 'test_bad'), 'assert') id = 0  - The test will be skipped for now.\noh hello sexy.  _setup_answers_mode\noh hello sexy.  _cache_put\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _assert_cache_index\noh hello sexy.  _setup_answers_mode\noh hello sexy.  _getAssertEqualityFunc\noh hello sexy.  _baseAssertEqual\noh hello sexy.  _formatMessage\noh hello sexy.  longMessage\noh hello sexy.  failureException\noh hello sexy.  _callTearDown\noh hello sexy.  tearDown\noh hello sexy.  _with_coverage\noh hello sexy.  doCleanups\noh hello sexy.  _outcome\noh hello sexy.  _cleanups\noh hello sexy.  _feedErrorsToResult\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  _cache_contains\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _testMethodDoc\noh hello sexy.  _testMethodDoc\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\n\u001b[92m>\n\u001b[92m> Hints (from 'test_bad')\n\u001b[92m>   * Remember to properly de-indent your code.\n>   * Do more stuff which works.\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  failureException\noh hello sexy.  failureException\noh hello sexy.  __class__\n"], [7, "oh hello sexy.  _error_fed_during_run\n\u001b[31mTraceback (most recent call last):\n  File \"C:\\Users\\tuhe\\AppData\\Local\\Programs\\Python\\Python310\\lib\\unittest\\case.py\", line 59, in testPartExecutor\n    yield\n  File \"C:\\Users\\tuhe\\AppData\\Local\\Programs\\Python\\Python310\\lib\\unittest\\case.py\", line 591, in run\n    self._callTestMethod(testMethod)\n  File \"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\", line 516, in _callTestMethod\n    res = testMethod()\n  File \"C:\\Users\\tuhe\\Documents\\unitgrade_private\\devel\\example_devel\\instructor\\cs108\\report_devel.py\", line 75, in test_bad\n    self.assertEqualC(add(3, b))\n  File \"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\", line 613, in assertEqualC\n    self.wrap_assert(self.assertEqual, first, msg)\n  File \"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\", line 601, in wrap_assert\n    assert_fun(first, _expected, *args, **kwargs)\nAssertionError: 3 != 'Key 0 not found in cache; framework files missing. Please run deploy()'\n\u001b[37m"]], "wz_stacktrace": "<div class=\"traceback\">\n  <h3>Traceback <em>(most recent call last)</em>:</h3>\n  <ul><li><div class=\"frame\" id=\"frame-2375040634096\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\AppData\\Local\\Programs\\Python\\Python310\\lib\\unittest\\case.py\"</cite>,\n      line <em class=\"line\">59</em>,\n      in <code class=\"function\">testPartExecutor</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">    </span>@contextlib.contextmanager</pre>\n<pre class=\"line before\"><span class=\"ws\">    </span>def testPartExecutor(self, test_case, isTest=False):</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>old_success = self.success</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self.success = True</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>try:</pre>\n<pre class=\"line current\"><span class=\"ws\">            </span>yield</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except KeyboardInterrupt:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>raise</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except SkipTest as e:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.success = False</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.skipped.append((test_case, str(e)))</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-2375040861680\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\AppData\\Local\\Programs\\Python\\Python310\\lib\\unittest\\case.py\"</cite>,\n      line <em class=\"line\">591</em>,\n      in <code class=\"function\">run</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">                </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>self._callSetUp()</pre>\n<pre class=\"line before\"><span class=\"ws\">                </span>if outcome.success:</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>outcome.expecting_failure = expecting_failure</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self, isTest=True):</pre>\n<pre class=\"line current\"><span class=\"ws\">                        </span>self._callTestMethod(testMethod)</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>outcome.expecting_failure = False</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line after\"><span class=\"ws\">                        </span>self._callTearDown()</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">                </span>self.doCleanups()</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-2375040861792\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\"</cite>,\n      line <em class=\"line\">516</em>,\n      in <code class=\"function\">_callTestMethod</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span>self._ensure_cache_exists()  # Make sure cache is there.</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>if self._testMethodDoc is not None:</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>self._cache_put((self.cache_id(), &#39;title&#39;), self.shortDescriptionStandard())</pre>\n<pre class=\"line before\"><span class=\"ws\"></span> </pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self._cache2[(self.cache_id(), &#39;assert&#39;)] = {}</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>res = testMethod()</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>elapsed = time.time() - t</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._get_outcome()[ (self.cache_id(), &#34;return&#34;) ] = res</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._cache_put((self.cache_id(), &#34;time&#34;), elapsed)</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-2375040861904\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\Documents\\unitgrade_private\\devel\\example_devel\\instructor\\cs108\\report_devel.py\"</cite>,\n      line <em class=\"line\">75</em>,\n      in <code class=\"function\">test_bad</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span>for i in tqdm(range(100)):</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span># print(&#34;The current number is&#34;, i)</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>time.sleep(.01)</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self.assertEqual(1, d[&#39;x1&#39;])</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>for b in range(10):</pre>\n<pre class=\"line current\"><span class=\"ws\">            </span>self.assertEqualC(add(3, b))</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">    </span>def test_weights(self):</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>&#34;&#34;&#34;</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>Hints:</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-2375040862016\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\"</cite>,\n      line <em class=\"line\">613</em>,\n      in <code class=\"function\">assertEqualC</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">                </span>print(&#34;&gt; expected&#34;, _expected)</pre>\n<pre class=\"line before\"><span class=\"ws\">                </span>print(e)</pre>\n<pre class=\"line before\"><span class=\"ws\"></span> </pre>\n<pre class=\"line before\"><span class=\"ws\"></span> </pre>\n<pre class=\"line before\"><span class=\"ws\">    </span>def assertEqualC(self, first, msg=None):</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>self.wrap_assert(self.assertEqual, first, msg)</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">    </span>def _shape_equal(self, first, second):</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>a1 = np.asarray(first).squeeze()</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>a2 = np.asarray(second).squeeze()</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>msg = None</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-2375040862128\">\n  <h4>File <cite class=\"filename\">\"C:\\Users\\tuhe\\Documents\\unitgrade\\src\\unitgrade\\framework.py\"</cite>,\n      line <em class=\"line\">601</em>,\n      in <code class=\"function\">wrap_assert</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span># The order of these calls is important. If the method assert fails, we should still store the correct result in cache.</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>cache[id] = first</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self._cache_put(key, cache)</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self._assert_cache_index += 1</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>if not self._setup_answers_mode:</pre>\n<pre class=\"line current\"><span class=\"ws\">            </span>assert_fun(first, _expected, *args, **kwargs)</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>else:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>try:</pre>\n<pre class=\"line after\"><span class=\"ws\">                </span>assert_fun(first, _expected, *args, **kwargs)</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>except Exception as e:</pre>\n<pre class=\"line after\"><span class=\"ws\">                </span>print(&#34;Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.&#34;, id)</pre></div>\n</div>\n</ul>\n  <blockquote>AssertionError: 3 != &#39;Key 0 not found in cache; framework files missing. Please run deploy()&#39;\n</blockquote>\n</div>\n"}
\ No newline at end of file
+{"state": "fail", "run_id": 1789, "coverage_files_changed": null, "stdout": [[0, "\u001b[31m\r  0%|          | 0/100 [00:00<?, ?it/s]\u001b[37m"], [1, "\u001b[31m\r 10%|#         | 10/100 [00:00<00:00, 97.25it/s]\u001b[37m\u001b[31m\r 20%|##        | 20/100 [00:00<00:00, 97.27it/s]\u001b[37m"], [2, "\u001b[31m\r 30%|###       | 30/100 [00:00<00:00, 95.97it/s]\u001b[37m"], [3, "\u001b[31m\r 40%|####      | 40/100 [00:00<00:00, 95.72it/s]\u001b[37m\u001b[31m\r 50%|#####     | 50/100 [00:00<00:00, 93.34it/s]\u001b[37m"], [4, "\u001b[31m\r 60%|######    | 60/100 [00:00<00:00, 91.76it/s]\u001b[37m\u001b[31m\r 70%|#######   | 70/100 [00:00<00:00, 93.45it/s]\u001b[37m"], [5, "\u001b[31m\r 80%|########  | 80/100 [00:00<00:00, 94.95it/s]\u001b[37m\u001b[31m\r 90%|######### | 90/100 [00:00<00:00, 95.62it/s]\u001b[37m"], [6, "\u001b[31m\r100%|##########| 100/100 [00:01<00:00, 95.82it/s]\u001b[37m\u001b[31m\u001b[37m\u001b[31m\r100%|##########| 100/100 [00:01<00:00, 94.89it/s]\u001b[37m\u001b[31m\n\u001b[37m\u001b[92m>\n\u001b[92m> Hints (from 'test_bad')\n\u001b[92m>   * Remember to properly de-indent your code.\n>   * Do more stuff which works.\n\u001b[31mTraceback (most recent call last):\n  File \"/usr/lib/python3.10/unittest/case.py\", line 59, in testPartExecutor\n    yield\n  File \"/usr/lib/python3.10/unittest/case.py\", line 591, in run\n    self._callTestMethod(testMethod)\n  File \"/home/tuhe/Documents/unitgrade/src/unitgrade/framework.py\", line 534, in _callTestMethod\n    res = testMethod()\n  File \"/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor/cs108/report_devel.py\", line 67, in test_bad\n    self.assertEqual(1, d['x1'])\nAssertionError: 1 != 100\n\u001b[37mDashboard> Evaluation completed."]], "wz_stacktrace": "<div class=\"traceback\">\n  <h3>Traceback <em>(most recent call last)</em>:</h3>\n  <ul><li><div class=\"frame\" id=\"frame-140582372419264\">\n  <h4>File <cite class=\"filename\">\"/usr/lib/python3.10/unittest/case.py\"</cite>,\n      line <em class=\"line\">59</em>,\n      in <code class=\"function\">testPartExecutor</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">    </span>@contextlib.contextmanager</pre>\n<pre class=\"line before\"><span class=\"ws\">    </span>def testPartExecutor(self, test_case, isTest=False):</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>old_success = self.success</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self.success = True</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>try:</pre>\n<pre class=\"line current\"><span class=\"ws\">            </span>yield</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except KeyboardInterrupt:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>raise</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>except SkipTest as e:</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.success = False</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.skipped.append((test_case, str(e)))</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597696\">\n  <h4>File <cite class=\"filename\">\"/usr/lib/python3.10/unittest/case.py\"</cite>,\n      line <em class=\"line\">591</em>,\n      in <code class=\"function\">run</code></h4>\n  <div class=\"source library\"><pre class=\"line before\"><span class=\"ws\">                </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>self._callSetUp()</pre>\n<pre class=\"line before\"><span class=\"ws\">                </span>if outcome.success:</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>outcome.expecting_failure = expecting_failure</pre>\n<pre class=\"line before\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self, isTest=True):</pre>\n<pre class=\"line current\"><span class=\"ws\">                        </span>self._callTestMethod(testMethod)</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>outcome.expecting_failure = False</pre>\n<pre class=\"line after\"><span class=\"ws\">                    </span>with outcome.testPartExecutor(self):</pre>\n<pre class=\"line after\"><span class=\"ws\">                        </span>self._callTearDown()</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">                </span>self.doCleanups()</pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597808\">\n  <h4>File <cite class=\"filename\">\"/home/tuhe/Documents/unitgrade/src/unitgrade/framework.py\"</cite>,\n      line <em class=\"line\">534</em>,\n      in <code class=\"function\">_callTestMethod</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span>self._ensure_cache_exists()  # Make sure cache is there.</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>if self._testMethodDoc is not None:</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>self._cache_put((self.cache_id(), &#39;title&#39;), self.shortDescriptionStandard())</pre>\n<pre class=\"line before\"><span class=\"ws\"></span> </pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>self._cache2[(self.cache_id(), &#39;assert&#39;)] = {}</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>res = testMethod()</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>elapsed = time.time() - t</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._get_outcome()[ (self.cache_id(), &#34;return&#34;) ] = res</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>self._cache_put((self.cache_id(), &#34;time&#34;), elapsed)</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre></div>\n</div>\n\n<li><div class=\"frame\" id=\"frame-140582372597920\">\n  <h4>File <cite class=\"filename\">\"/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor/cs108/report_devel.py\"</cite>,\n      line <em class=\"line\">67</em>,\n      in <code class=\"function\">test_bad</code></h4>\n  <div class=\"source \"><pre class=\"line before\"><span class=\"ws\">        </span># for i in range(10):</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>from tqdm import tqdm</pre>\n<pre class=\"line before\"><span class=\"ws\">        </span>for i in tqdm(range(100)):</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span># print(&#34;The current number is&#34;, i)</pre>\n<pre class=\"line before\"><span class=\"ws\">            </span>time.sleep(.01)</pre>\n<pre class=\"line current\"><span class=\"ws\">        </span>self.assertEqual(1, d[&#39;x1&#39;])</pre>\n<pre class=\"line after\"><span class=\"ws\">        </span>for b in range(10):</pre>\n<pre class=\"line after\"><span class=\"ws\">            </span>self.assertEqualC(add(3, b))</pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\"></span> </pre>\n<pre class=\"line after\"><span class=\"ws\">    </span>def test_weights(self):</pre></div>\n</div>\n</ul>\n  <blockquote>AssertionError: 1 != 100\n</blockquote>\n</div>\n"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_bad.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json
index 78741a0bca6e3880db82497c26b49c6a251ccaa7..6b4397cede5c24c76c1eba7d9f532ffa9b548b76 100644
--- a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json
@@ -1 +1 @@
-{"coverage_files_changed": null, "run_id": 613968, "state": "pass", "stdout": [[0, "oh hello sexy.  _testMethodName\noh hello sexy.  test_weights\noh hello sexy.  __class__\noh hello sexy.  __unittest_expecting_failure__\nhi there  __unittest_expecting_failure__\noh hello sexy.  _callSetUp\noh hello sexy.  _with_coverage\noh hello sexy.  setUp\noh hello sexy.  _callTestMethod\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _testMethodDoc\noh hello sexy.  _cache_put\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  shortDescriptionStandard\noh hello sexy.  _testMethodDoc\noh hello sexy.  _testMethodName\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _cache2\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  assertEqual\noh hello sexy.  _getAssertEqualityFunc\noh hello sexy.  _type_equality_funcs\noh hello sexy.  _baseAssertEqual\noh hello sexy.  _get_outcome\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  _cache_put\noh hello sexy.  cache_id\noh hello sexy.  __class__\noh hello sexy.  _testMethodName\noh hello sexy.  _ensure_cache_exists\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _callTearDown\noh hello sexy.  tearDown\noh hello sexy.  _with_coverage\noh hello sexy.  doCleanups\noh hello sexy.  _outcome\noh hello sexy.  _cleanups\noh hello sexy.  _feedErrorsToResult\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  __class__\noh hello sexy.  _error_fed_during_run\n"]]}
\ No newline at end of file
+{"state": "pass", "run_id": 766225, "coverage_files_changed": null, "stdout": [[0, "Dashboard> Evaluation completed."]]}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy-test_weights.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl
index 9dfd8b4c668a3122831f4ba8f5a16227110cb4fa..a8b4258ab07e08e10c14cb8ff117771a4e3d33e0 100644
Binary files a/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl and b/devel/example_devel/instructor/cs108/unitgrade_data/Numpy.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db
new file mode 100644
index 0000000000000000000000000000000000000000..eba28aab5e607cfee36521a00079738cc07361f5
Binary files /dev/null and b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm
new file mode 100644
index 0000000000000000000000000000000000000000..43bd309ec02e932fe394e3429744ffee7e9391ef
Binary files /dev/null and b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-shm differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal
new file mode 100644
index 0000000000000000000000000000000000000000..8d0e796d83831dcce78b83a22f959c52a9520a52
Binary files /dev/null and b/devel/example_devel/instructor/cs108/unitgrade_data/cache.db-wal differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..0889adfe5ecf9f47930b690d68842f07548476b6
Binary files /dev/null and b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl differ
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl.lock b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.artifacts.pkl.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json
index 6fade8d417fee2bb12951531ebd0d7354f278d43..098b70e81bd659d0cdec8bb46e200f794eb718d0 100644
--- a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json
+++ b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json
@@ -1 +1 @@
-{"encoding_scheme": " from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;", "questions": "/Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4AHyAVFdAEABDnx/coDphHtjJz/Hf6BMJ8jsKcX6D2GlIpCTdQefBtKe4zVRUqE7IM4RD5R3T2C+GesRy0Q5CgrFjodW6xsauaOPVgMwAO1n3axAyU1UhpvS1V4sPs0g7xaNtsCv8oRoe1AhKnl3IFDf6Gg6nXO36ces1MgE7xDz9CSsQ5T2chCmCFLNziwvyXiZKmi6MvcRQ49bpAWpgL4hLMkYc3stfxkRNFCND+MKghupeHwxC4fWNFnP648dKpkQg5xXbkFyD+544w0PH+PJ5pebdXG1+e6LAMSZhOnTHNgUV/SOoiYRLohCowLRTz82ihjKzZH+EqvquWg5r0Yx3Ja1gRz3xz+q4ucPm5sFnELtxqjQdRQYpfjlaDlfNe0GiwzrpgOXv1Vdggdv/bafsYvcXpOkHIRXexotRNAJX9b9f1h2y/P3pMnYlmmzbQXfJYsgvXoAAAAAyhvQuqp9FQ4AAe0C8wMAAPGI2oWxxGf7AgAAAAAEWVo=", "root_dir": "/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor", "relative_path": "cs108/report_devel.py", "modules": ["cs108", "report_devel"], "token_stub": "cs108/Report2_handin"}
\ No newline at end of file
+{"encoding_scheme": " from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;", "questions": "/Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4ALJAW9dAEABDnncV35phHsyxOZ/WAdcsRcnyJl1OO/vI8mjmhFI6lWS7SfFyihoIfXWjVmlOSPIYt5RtCJvS/3j4bxa5pi+3PPpcnS2VzmHCG1Ro9va9QyFawpcqgSSGVWVFndTK1xzGnFnOEsQAgiJ0VB9ATsnpaY1K5Z1aravch16BLCzLWocn3K1egojbjfRrL6HkB7XP21nDmqPeoHjVZNY6QM2BV9RrnccWViu+u9PVaH/q5YyjX36FQhwsGiMGmIM/LaZzWCyCJt7bbYjq1UXgqmMRrvYwHAXCeoFH1McQxAWLW4P2GU1rZqLMKc/OoQjEvMZdHxWkQBqE2wS4++OffV1YnQ7I0xOljxcxIxhVlPVxUFyj/D04h0CF/ekMP1FxoZsff7QPOLT6apxSDa9jtn1P+u4E9eo30YIdFqZt6hmAmAOyVtNVJxeV+gOFhQOLe8suClVF+MoeXta30NnnqCdXgsjq0/69kvgmi1jackAAPVvsIGknWNrAAGLA8oFAACfhrnvscRn+wIAAAAABFla", "root_dir": "/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor", "relative_path": "cs108/report_devel.py", "modules": ["cs108", "report_devel"], "token_stub": "cs108/Report2_handin"}
\ No newline at end of file
diff --git a/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json.lock b/devel/example_devel/instructor/cs108/unitgrade_data/main_config_report_devel.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/db.pkl b/devel/example_devel/students/cs108/db.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..4d91d3931e3377cfb0a6de8c39b187956cec0252
Binary files /dev/null and b/devel/example_devel/students/cs108/db.pkl differ
diff --git a/devel/example_devel/students/cs108/homework1.py b/devel/example_devel/students/cs108/homework1.py
index 4979a152a6fc31632fa91ec4d4bb8d4490fdd7ab..172a445a552511bf84f9b017989bb1ff13683da4 100644
--- a/devel/example_devel/students/cs108/homework1.py
+++ b/devel/example_devel/students/cs108/homework1.py
@@ -1,7 +1,6 @@
 import numpy as np
 
 
-
 def reverse_list(mylist): 
     """
     Given a list 'mylist' returns a list consisting of the same elements in reverse order. E.g.
@@ -21,9 +20,8 @@ def add(a,b):
     raise NotImplementedError("Implement function body")
     return a+b
 
-
 def foo(): 
-    """ Comment. """
+    """ Comment.   """
     # TODO: 1 lines missing.
     raise NotImplementedError("Implement function body")
 
@@ -41,7 +39,6 @@ def linear_predict(X, w):
     raise NotImplementedError("Implement function body")
     return y
 
-
 if __name__ == "__main__":
     print(f"Your result of 2 + 2 = {add(2,2)}")
     print(f"Reversing a small list", reverse_list([2,3,5,7]))
diff --git a/devel/example_devel/students/cs108/report_devel.py b/devel/example_devel/students/cs108/report_devel.py
index 79c4ba414ff0ef6386e844b0e8df8ad5c02de262..b80c79585d4ecc5b169b122d4e4d300ea9cebf68 100644
--- a/devel/example_devel/students/cs108/report_devel.py
+++ b/devel/example_devel/students/cs108/report_devel.py
@@ -2,32 +2,111 @@ from unitgrade.framework import Report
 from unitgrade.evaluate import evaluate_report_student
 from cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo
 from unitgrade import UTestCase, cache
+from unitgrade.framework import classmethod_dashboard
 import time
 import numpy as np
+import pickle
+import os
+# from unitgrade.framework import dash
+
+def mk_bad():
+    with open(os.path.dirname(__file__)+"/db.pkl", 'wb') as f:
+        d = {'x1': 100, 'x2': 300}
+        pickle.dump(d, f)
+
+def mk_ok():
+    with open(os.path.dirname(__file__)+"/db.pkl", 'wb') as f:
+        d = {'x1': 1, 'x2': 2}
+        pickle.dump(d, f)
 
 class Numpy(UTestCase):
+    z = 234
+
+    # def __getattr__(self, item):
+    #     print("hi there ", item)
+    #     return super().__getattr__(item)
+    #
+    # def __getattribute__(self, item):
+    #     print("oh hello sexy. ", item)
+    #     return super().__getattribute__(item)
+
+    @classmethod_dashboard
+    def setUpClass(cls) -> None:
+        print("Dum di dai, I am running some setup code here.")
+        for i in range(10):
+            print("Hello world", i)
+        print("Set up.") # must be handled seperately.
+        # assert False
+
+    # @cache
+    # def make_primes(self, n):
+    #     return primes(n)
+
+    def test_bad(self):
+        """
+        Hints:
+            * Remember to properly de-indent your code.
+            * Do more stuff which works.
+        """
+        # raise Exception("This ended poorly")
+        # print("Here we go")
+        # return
+        # self.assertEqual(1, 1)
+        with open(os.path.dirname(__file__)+"/db.pkl", 'rb') as f:
+            d = pickle.load(f)
+        # print(d)
+        # assert False
+        # for i in range(10):
+        from tqdm import tqdm
+        for i in tqdm(range(100)):
+            # print("The current number is", i)
+            time.sleep(.01)
+        self.assertEqual(1, d['x1'])
+        for b in range(10):
+            self.assertEqualC(add(3, b))
+
+
     def test_weights(self):
         """
             Hints:
             * Try harder!
+            * Check the chapter on linear regression.
         """
         n = 3
         m = 2
         np.random.seed(5)
-        X = np.random.randn(n, m)
-        y = np.random.randn(n)
+        # from numpy import asdfaskdfj
+        # X = np.random.randn(n, m)
+        # y = np.random.randn(n)
         foo()
-        self.assertL2(linear_regression_weights(X, y), msg="the message")
+        # assert 2 == 3
+        # raise Exception("Bad exit")
+        # self.assertEqual(2, np.random.randint(1000))
+        # self.assertEqual(2, np.random.randint(1000))
+        # self.assertL2(linear_regression_weights(X, y), msg="the message")
+        self.assertEqual(1, 1)
+        # self.assertEqual(1,2)
         return "THE RESULT OF THE TEST"
 
 
+class AnotherTest(UTestCase):
+    def test_more(self):
+        self.assertEqual(2,2)
+
+    def test_even_more(self):
+        self.assertEqual(2,2)
+
 import cs108
 class Report2(Report):
     title = "CS 101 Report 2"
     questions = [
-        (Numpy, 10),
+        (Numpy, 10), (AnotherTest, 20)
         ]
     pack_imports = [cs108]
 
 if __name__ == "__main__":
-    evaluate_report_student(Report2())
+    # import texttestrunner
+    import unittest
+    unittest.main()
+
+    # evaluate_report_student(Report2())
diff --git a/devel/example_devel/students/cs108/report_devel_grade.py b/devel/example_devel/students/cs108/report_devel_grade.py
index 25b3ba567c2e51876c8551ab635f7f1497c3cd7d..587517f9c9590740b26659cbecaa6ea18291e775 100644
--- a/devel/example_devel/students/cs108/report_devel_grade.py
+++ b/devel/example_devel/students/cs108/report_devel_grade.py
@@ -1,4 +1,497 @@
 # cs108/report_devel.py
-''' WARNING: Modifying, decompiling or otherwise tampering with this script, it's data or the resulting .token file will be investigated as a cheating attempt. '''
-import bz2, base64
-exec(bz2.decompress(base64.b64decode('QlpoOTFBWSZTWd5U7ucAXYp/gH72xFZ7/////+///v////5gbn773qe976l828V9KIW3WdkIQFKr0Nowkpe93CLWpVQQHewMVqu3vtvG16gD5rCokmmqpS60UqlPsbte+76r1t877Ge30fTwHXQA076777fGxvSVwVeIXuy96ne6go+q311r6t7X1r7c3nbu7H0Lre7OpnudHWmX0Xvc9e5fbqFe97xL3su75O+72DQAGnvfcHPTndw97dt9832Ps5T0+Hvce+576+zm27NV1afb768b73fX096xp6vu77721zuD7n17sXy3wg7x22n3l2RaTBPcB0+t77uldbNe9zXivjPu6D5Xns+57rS9DUXfDpth91n1fe+t3Od12yzdg7j770+r5MTtq3vndX32ePvu6vtj3mHS93V0rQ7r55588JTSBDQBAQ0Jk1MARoTEwphJ5qjyh6TI09NE9TRoep6T1BKaBCCCEDQmp6Aamim0Zqmm9RNojEek0PU09QYQAAAkEkII1E8mip+qfkFPTUyepp7UaQ0DNQPSAD1AyAA0ABJpJCTTQEp5pU/EptT2pmppPSPSeRBoGQ0aNqBpoBo0AaaAiSIQAmmJkJkaAjRqZqm00NTSe1R+inmiGp5E9T1AD1AepphJqIgk01METamnkmU/U1PU9Ro8iJ6n6U9QAaAGg/VNAaAaaB1gf2kBD5PbSgISCp7YkgHzxQD/aQgAT1sgoQFkAJZJ6VFiqi/Cv/vj+Gk1GGOP8f+Von/oyI/pec+jPpw2Px/3T8I8IJxcf9Y3na5g7f84JFK5cJ2loTJCZN3/s1xP+9WPyhH14m/XNLtp5K0fdi3pDGRor2vhTjI8uGS7KccbSPDziogWJT7fsyRzeoGW7uhLSJ9EYnfu7NKhSi16EahzlGyHIZOXoqjHPrg/9n1kZQ+r/1XFAl/nxfPWWvCUIT/7ylOUXdLNfeqxt7bGCRV+vqNnUAiif+ssceBgqKqnmQUPmYjFBSCwUESKSRZFIAfUQKGKLAEGK/eSYBAkz/VhKQCRgkJIpAkM5YPI0tkgNcPNW68mviTMcfavLovmO/J1E9adZPVUCygqwSCrLI0VUFFP22FgxFQFUgqDIiqkFuqJH/bs/65bvPDo34Q/g8gvP0Hrv87rrqzejamRxYwdRHjvGwZ2le75ltDvNyW501rg5+jy3zq9+C6zCxONkI94sZcibskqvgzHdmJ5AyaPf1mMEYjru8U5va4OdL7r4ZrnrreJSKWhb3YuxStylB69L3quo/1Oo2NBSY5mf6FutYWsGfz3w/bX+n6aS5a+6G/UceG/+UWtwvkHFs1Nv1PgkU/9yZpfjlCc+ViNta3xgmPej2J+PwR09jr+bt2oqDNW0tOR64Hvp1wPjBOHvpZG2Xqs3+xd/Gd8RFXmcAmwlM93rs45xrtf/nNdjK/Bw5cN3B7qrWUHuvbu4CItUIh6fITQwPdI51e33lXBwSGQEE8OfH6k31yI/6VF1xJn73g2gwQ6UqniXjZZmftz+bhtDqK6HOLQ+jw80chNBpv3KPrgzxZ/V3dbNBl4fLbp4euhCv87/H2R2h5yEnTtTg6V7CdQhpZFtvg9q4WmtiLc3PCMRxiDkKdw9SZrvtBavp4FQLBWO1MY24L61kj6F7TB9PZztMdq567E+d2kCV7kow0xf/TrIWYytlEzEuG++mV7w4WRlTnH24SnOx7MGncPXflHdSMSawv3qJpLYpplru7P5Z1NjCUc1BWIhOG3N4GM+U7Z7SLrNPKLrzTfn4aMyq+FqM+FWQuYqJ9c+BI39U+v1llI7mn9ulXlY99ILvkzijp0289zHR98yxzs2/Ew3p2iFpI0/GYjqhcPGDVkzJhcSnJpnqvB5h9l18X4lspQnd8K40ukwR4/Ft60my/Ncpij0oj3vJlB3XRMo+Aro7gXIIeJ5fV4nnm2hPZChNaWkYBE9o8khFmAaeBRR0wOUku95MMv52Ww0ft+vXv3M2jO+m5WjmwoXzYx+bIDmWQhkJ32bkvpdAhXt4y4FxuZjgI2VoygaVIu5zmx+87cRRiZGXcyD/2PMB9vOm7S1G0kQkQba/trHDkbGDkPz3ZZSCqIj+JOp7Ssb9M5Sbe7PR09lVuStc78xSLvVRcarr6dDBA97D39pXm2hBxAHqTYpsUDZfGu2ToRsTNkMac+MHxXexKDOBpdOF01LBpncNIId2pN9gPfhD4/LcY5cdQGYxivgguG1OFiGnz3vqzaxpnHMCdwRDlQjfGp/FpMePFW2lQucfroED6aO3rIcLIm1nmkRE38wWkC9ixrx9G0JCSk8t1YSiz/68HgUOYmYRMR+BAMKsCGZzBm3x+qMdScjllbVPxbWYyMCuNirCGUz5boG1C+8qJPgJrWqO6l37XQxZqW5D2vl+S6+PUsI8YcFb7qQeIw0pzLm6aEzkm3ftvHO2d52chXu2xRudyuJ65J6ag/Lr83PF3paOomzCY2HNbv6+revtBK7aOvFcaTbQPN20Bx16s+Ioo11qE+070WLbeYZDuj39DJlVyjtsp4b1k4jrr8KRTIjjWDUN6c24vYYBHE5WvPbWyUTCBaoij/atLReipddZcQHKFH4XwvjPTgBVNNtOeF5kmjK4RsV8nKECPLAh/lcYoVI7Rhthw0eIcsM4m7XKUbdz2wcsvj573b+pfIXfwxjhtsKgI56tpeqwzYLtdDRT4tL4Z/9VAgrfQO3f9QnEi4tG0M8sGbaSyCBjYxEgHQkxrALpbG9xN2l5ZDUhhbC9svlTPG4ssZ2gcXcEzWlrMGEJYk6E7S7HHHWRTzay20LmqLssCZaFX0uCn1ilPRhFmCjRQHaTchpF1d9j85HChVxNa5uowdXbLaHbm+VpY9HMjib3U4DtBy5lgcdj6DbfCQKc1aXSaMOBhlwzgfm0DCtjJXTHH7iZHHgP4jjLKrNxOvRd8CQkLXK6DbofVtYvYZx5cmyMpWj3bi6M+bwwJkY46YFhUzPesC+qSGZCSDlNee6fQ9ksi8Gn4v0tpBjb1eJ799G3njKM/PLMz6Pu2qFuiuZSPhFi9GL69nHjwgyQiLr5xYmpmlpQiyGQ78DM1SWU42g9Hr1m5z6B6lSk5WRtCSKPtlQ/DKlmsvGpE0XrgSTc6WTaT44PpKTFxvH3W98DjZia4l/NqOy2IV90SRPLvJ/dGJPq0u7ooyMySNpB9jVJn4dNMcxzMMhfAyMLMCjTTLGyJdpp95tAuHlMU5OtoE+fdHG2l9xB6IXWQCTdYRHUpqg4s8aZ2N7DqdN99Y2UzMTNEwffuaj9Ab8J8eMsmgdcJDw0bp6jXX9a227+Xfv5XONodHcOvGUFglBtRR5MOgpseNFT9Gj01Kw9w5nKUt0MoPxcVWJoxq2NF4JPR9F4yxcN6tKNpMzH01AV93y3ynd+fx3ys6I0m2TDqK/tt0SBrFzgLeisfWbNXsJEovqWuaI85Bn29zw9VmhpWRteZzzc9rxsps+dY9Y5iZiImD9pjXKyUaZNnSbP9MkyJkZDYXxeGxzuapTWFIwztMcuqBd6+e3ZckYWGQPzWOpwNxCLdTJmyjlKBa7dVzsxSi+JR/yXvjQpXicRFpTVcRKnLyqDzxX6OpnN+bwXiVM+Hfo4Rz0yYNHTCz7KOow3Kdjyno80WOTJ/fJwXZZAO5bSjEV8ZRIjiEIHTMtyebTuCmORxLHN6bVdED2yG2bSWwGi65a4ZY9HwhvKk9cNsGaOUjo/WUyhaswXNQExBDu0yd1XbL5I77+3idx2Zk0nqDA+3qdpW9vpPRmgm+RscUlamcwioodIUWCBAIyhpulvIcCW0q77ytsSQUDO58B2DClxS4uIraHOVoUXzO4iGUl+TmJ0UQx5g/uI/qWE03GD0dx3mg23Y+wKKKKj67JRgiRtkqKYEwwsQWKNNRb7Pm6Nt14dnXYa6BorVgvIJIbo4XBAJbzAddut0bGbXAxImgKiuasm02WHSHDU9TscGQhCzsXu2y2IFiOSq4Y/MU2K1rhqhm75ock540ucw2yT2l6tMT4rJm6ZnNA7M7jeKCr8nHNjfRAV2UEW3iQFmi4Iu93brsodiiz1JJmqzeh8nazWw237hzqnefVkfMggWjiwwlLdIbKbsO/Vdng7oqK3XrNTKxhJ2RuteOgbQe3NtdxpkU0czawo0aKOrpKRJ62NpnkS2vmQu5wbLdUNguyuKZRIsG5HwMPQUzK2eny8m55SEnKRzGB4ZlLqfQ5LfJ6tpcF6nbhOoTCJsY1xgcLZ33wVy5orMErBC7sZdXh9mhpJEUQ7x49tsJ0HkvPD6Jjt5+Mbdsc6HNOxsY45BzVBFBWIEmyoouSi+D8il0Mat9Bbaicac9Zy9m02Y2e0s2drLR5O7GycXh5499byaTp0rvtp1nOo06WEDfhfGM8btDJI2tEFkYl6kyZhBbobXQHdShLAk/uVbWu4wCN+0h4rm7SQVCDv4T0MItNbyskun5FbcRGtwyI3yfY5e72CuD6PPvOU3DrzDqasNOutooW8FNEcLWULdYWk4tvOcROB9Ao5fts1e26w7CByjOJNmUNfXnfyrnimEeEoY2BLWjfPS7GPujbYaQIG6OrUcTIaO/jZkTed+XjOCUMOxzBiEbd2m7C25Nqnbyd+yt/R2zptYuSeHeX1p26RQY+bFlIycsZMno6h/D7jwIdGPaiQd8bDpB2Npg80ZCmJd/B8GS4RfkeO61iPRF64jWzcHVrc3z4d5xedfNTZainOTB0U03PFkzwyaWXiI4HO774ZX2fcnR4c9s44GVvEHOrPEca6+xUoSG3numXcXoguOUIILS0olP7c48DttDFt7nim2o7Kvqg5yxpv1tRbSCpbWRStZ8sIevas1Exd+pSX53aMH2gmgtTHVjVzJvCRTV5B1Rrd0aUDFPS4wLIktxvKBIQgIBNnKuKtNwoNayPgcfZgehyR1mo9vtMWMYsBu5x++x57nfzA3o+uJwt8cbN3Z22a78oQmjuXqPZoOwIcqOQUFyEdAR9aCQj3OOTaxiw+VzneI89Mb7NCLXRN6OsjVmQ6/X+wPaec2Dv2Jubi/fLj47loxWg2LT22zmEIRKMY0mxZ3WHZdCBdt5v29fyT62fRtXek6KG93hBU4xOyzieko6wTxLtD7B7bJ4fwdUlVvyXdIezNieKwhppGWEtttoYI7VZIrJQ/NtdZKSr2/Gfy18MK3Y15eWEI0rnQ3lhInI4XW7nttstVmPtptdbjOs+E3lXC+iZCvWU8oFvkyPgDg420R270YBeFas2Ejsh5hS4Dr47oxcXojLzRuk914Y6kSAcv0S89bfprDq6/RjUkp+3HCXXTZQ+XWg45pa8p8b+ksMuL7YUTNJ3vezqa6u9QEoUoZcfD0KMWQu0TQMOrL3G2nv18C7hKyIgKFahyGsAyMFCo9VWDbIsPj+iyZr+IyXODg0QtswisVLZnPuaeKHWXvfghoLfm/fT1qG3CE/qz4a6e57r0YK1cv41/x0Mezt7Z3VPIwLO970MymDgKPpLYIqs04WNOaIFY0zGsEq4QHWRzBUZ2P60/qQvuCxVh7bW8kLiGq7SzIypPwhDTHtj0wbK9ms1Fjf4db4LW8lmOvLGM3VirGOz9nlc0KW8qV3HQ22YcBvKp4ink+IiEj77wE3cPAbSPUIxeh/nrMbKLpHDu+fi0t8mQtuibY5rNsJeLtJhuPKTeaLm7LDhvq4+DNBU0YLhp8wX8Tk54Uf45WidgHgUf6T2f63EsYFJ+YxKTy+Tx/R/gB46vaTD5Pdmwke8wJo7VC3GNgiXn+kxzBB13Plz3HI6/wfNuiKqqrEDoQhw4b7pTzaaKidQyjIsVVWQ24sA4Q442xDO7wcwNCz4lRSCh7oyV5qlZjbSo2yrbGIVYySqyVlWIBUFCv/1phmYgbDrwe7mWuIOIjqVNHMLURk+X9oNzEBhlIZehkyCEwwBFYpEZJgQ75E8UMZt5hwxhGsc4hdichpGmp3c1UQV+/Hf0w/N2908T/3remUVB/DLwxh0O99zMEulNQkhGws8mQTaG1k5JnRBriSxD03dlj95MffLwqdPBqHzy6qVMxWz8K5fa8QWYfGFag1guLpz9OHFnxhkqnOCWUoi1VOtqvElS8vGIdJYp36e5ij95ixeRpfMjTWfu8/bojd6xjRxyOIoCmQ1pIv855os42sizB8QiW3EHffBoyPhqsd3uioxHgo8Dor2jaKcSuD0lgiTxmb3QyapgZyHMIzSG9BzKxKWHDcxKypDAups5HcLEO8yG0O7ttLp8mb0PWfPzuCTNNA6NGm6IeLaDmQkzLJxjdZp5EJP5QNShAUdsBoKNMyNQ4h+rZdsv+lrHoemLpEZ06ynyQmXb3mU9EsFuayoTY1NOo0LS+wVheQKEf0iLwqyF46uy8xYSscDKxqqRFhCGmDxcft7BcHAh2Kibw72Xlccm2ScI9q9pGwSKCMjrLOLVqWO6T42waKI8FCCi5VVgm5NsvchzLn9HHX3TxZvG8OQnNRRUxvLPTd10gtMX1rNNj0Jve7ppxUiHziJQT89OfHgTtUK/VMdddcaLab3VFA5GrjjBdckD5LoWOtQIg9RBXqF56bXldNI6bT6PkOSyOj6wfp/X92ZO/OOvSi7d/+Kco5ZR4bJ90XI50hV34rzeTlHw7bu6fL9c4CzXbB/TL9kpla9a7K1iPnskopOvg90KYUL+l/s1Od/344e02h0i6YXzkppB+nF+K64d8Fm9Y/Fk++6nShYqd110HkWMcCD7lEshNwq85ycl2Oc03FbQeiw689xjhpveDpWIdME0fEWKyda8w8I3EvBeC4F8rx+v38WHMT0ZeufPPg2C/FEPO5Jov0zmH6PB8X+bl79kQ6IW8vx3UGlPRGTMQFSuN5fq9L5LOphIVPO0q7N34YZ52WtYq5nSJoGBShFGqSYgk47OECPojnl2UjYzp2ccxtgdCRCMNpUsejBqwg1N28g23dyAg1GE0U7HO3o0deKFZeXeEqR82kvpxJ9Kf1uvDvGuKg2Pqe+G1vPCMP5vR0euJzh6p56fRMi51Oscmu1zbUb5OdNDqLYDbJGahmb0U8EWQO5EbmZrhLtGjanKleza4wsPMRioUO0e1VZXlnpqW6of5uC9bcMyCDoewjjnKYtG36vZit73RNMueMXRGNXekn/jC442TXVF0vB7KRSVHOMjTCVJXPvzBNdttHuz/TWrZXnULSPmp+B/POMuOa9l+CoklH1OQRi9j3RpI3zcnW+Dd2t3GLdV7bl6OFxBIyoP9nXi/Gp8k6zMHl82xBz56k0qFNRtzLpb3TxD957lAxe6bulbAsXdK+yWqjct6j5nkuHo745YY9LCUMM95grKxxLlTFZqZh/niZXSpXd9976cR9k2ejTsDir7PZFcq7K5n0njm1tDxl7uIG6TXeT5lCv0v44UO6ICdem9+9+y+kL1Pcr5y5ay5WPJOpp+q4fF8hFYzeM7a92MY96hKMqFNvNIhO3hmSKPV78T83OMb9XvO0Vquw8P793MoFV+rbzsnZFpRDukJBG/u90Ez08W4bSuwD646QZiJLU+kiAez12UYdBwEEmaxURHEApOfzXJDkybDBZqIRGB28Zd8dfNSE0vKESAU4S55+3owXMNxiI/XeNVEROgNva6Zkzee/hM4Q16+jPjckWtuffPt4HyLGCCHM2Or5BNJDeZV4s6sVwmykuOF/BVX6ONc1h/J4Z5m7k3pRNPrbexsoPPHDr4K66XBXW+2DSm8hr6w8UQlrQCMdaE47wl67gnpiYkuFlPmtsyk+DF64YuYWULKcD29szjmVEMtyWKU193c73fpd0IWk6ddEQaePd3YpEmg5Er4W8yOsIqIOOwpb+zdfhi9BD2QOd+PZEvuslOQcE/S+HrjPDCeF1hO/3OrThanHxypS+PKbbgRvp+cjsZqzkJ6cw9bPRMngaaor0c8yT54DDjhNLObfK9GGi6CzhsF5LLOhQqsbnI5wINOKUQ5HVCvFEEsgdMmG2NsQsuI7ezPgvb7sG+R+EODXxawjfI6WSn7crMFcQX/PSh2thcVo4+7iYxlbbu83WqbSu67br85d3P6R0FLfB9YVqO3RFS/Ss5G27TBX8Wb3ddvPbfp0IyZ0SW5XBnwlpQSP5R1+L+jTB15y0rLyAXnKy0iJ+0m0W4c0d1YlqNKOFyGgi5uzKzcSeye7Ttm15rug3HTyY8kAfvxICY9EXLxxAz/WyfiunHEPQRYSOo5GD+TSXeVGG/P8hzM4HPs0aDKPfY3gVIZEcbns/Qr9r0pieSRxxO+/Mz9BF/ZteM5Qqd11KduR5IJUfhBvP3U6D2Nx0jHEHRSlNfbMsoOIyNMIf4xx4twWfWfjHRw5aYcuMT4HF+E2gY/I3r9AkO58PgGB6TnuyDQRsIG2RLI2E1qdk6ENcVkscrXkq+tx29lfHnX11ju2NzCdYgu+KutPnNGEZ1ica+GefuvW7c8Y43458KlfEpUE9jRFFVSFHE1R2Jz83CTWY+4aPs7GBiWYRjOW4uT2NZFwEpWyuMcsFHnyY4jYYwNmdC2gmJMSGHFsQ43UHa0zW7gzaKLUCaRyD6OrYMAEdhsn3hAosBmE4Jdf4hna6DkUY6YrIjwmkxx0jhNscSQxBiST9n0/R1M8jN+sHbgD626jnnxguOIUNUQYGwnRknIycj8cLmrLQ5yKQJo/vVImby9oqIQv15DHBkr0m8EM3oXZiQ5YNT1zxpJkEcleY4viwfTiXEN+EXiU8RlEtAvcmZzzJD3HIPkLa8OkCEg+c0aD3XYx3NG91hpVTBDq04yBCWNRuSGEJE93VpHbBpQGDq3KN1whSU/KaSpD4HZNOyHmTarF1loc2eytIX9kAlFLxuz9crlNkmBUCfzudpzHvZCCxzs4qFqIxc6dV8C9C30ACcZe4LbOFdq7kcvZ7zL53MannhAkIWC4PcJrPUxcMih16u9AMhnE3wcufGM/I9UzQnOPEzGlSNdQRI1q05gmQKIjAQyEa9TnvxN3aB36G47MwnifMMXh0OzQBQBwfe07iYQDe40Ae7rayJ78flC2wsCB4Bq0A/OFu8Lm+huC/efZxXKR/wXdZ6h6gdNda+9iY6oxDYQgTIz0/Q/i30fh2xE3bHZjaqtvDDMYQylzt/XjlBc0bJiIkkqYTamgesKKuzfvhubQT7SZ9pQGj8pUyi/mjIjiS+pi3y6tjvHnt9vd5WPF2hbrTtsnONh6Xpc/SipSEspW0EN+33fpc3NlF66XrnqZfHza8mLBmWqx8cXMCDuCEyQ7KKZvUjhTfMiaEdVXZs4PF2WhwKQSZVEOJDJoMwV6aj2m8Osr/TGXiz5WP7YlLncje/rf8o8Qja4c1CCHHrfbZdBUCfX5vr5Fq5N2UnWGD0d2iIsFGKsUUYaYrAo4goki1DqCBOhRTw63HR2FlSdHCdnQ8vIj1cMxMJBm7apbuna3cJ8m7GoDY17Ax0zfSZYGpkdiSXVfcHTfY2PLSqo44uoeiPRhQs5jrjM3olYqX/nrN1jWaf10sNq3xGEK3eM3ne9WSJ04ijM3iCaxjNPeImtKzAkkph2isxOJTjvmMIiUz1NVqImaV1CU6giEg1KdCxdDvOKHEkmWSI1Nu5TaHDVPcUTp7SRaFEzZGofUrOrzb1jRebxaRm7JzmrMxFxRUxRGbyyIlJTNTcv1PHyZeAbNjR15ygmhq1vQpdh0wdg0UViZIbfSt1qMQRwaxE/joa1ZdoZsGKklAAgjU8Gv5jJ34TtZQ/aDp7ff4HHU5azoYxARVBYisfFjovCKoua67mEwkk0gus8wxKIXV4cgOcqtsKX979px6vV73fr1rrjc6VFTMPLlXvwrisZuwdpe3wn5yQauauMKXslYxV2iKLeRIkuorFtMuIQqUEJrLiXV87XE2Iph8VlaU3olP1nF1EJ9ajbVbajxM7rUBvGlspjC3V53fJu+/Dzwpg4zs0yj6JuaN4o1jjLLTXAn1mNPJGMb3ijIrdeyb5L577aarN1xnf+iP7ehlluY4fJhhq2Nx+uX6bKOZ6Z7cUobl+ZcllzqUcq1vx4tr0Z4OFVl4OG2Uk9NxIPhioVO77IhkrSIiOxMcT1JO2pF001SNydn6LMCl5N+aDZG5eoah/5UE59Zdm38dsdhUPN85b2Hb3aYBk5tMywqOdm3O9iFE2ZdTwwdEaK2M+uZM27boypTofLs++3mG7bDBbbMeQZ35Lndu2idGcGeH4EYktYP8O3G05SNIV4+JC24mSQwmaqszgxp5w4fuX1pI+L/jD8u0d/2Tkz/b7093Lq9sc3f37+peZt950XX4GXphm0eL3WAFRz3ogkdByD/dFv0H7PLyPuduzUv/JHerGxxg3+iP6xREnlm2DmrRb3s97u1tX/gP+g5faJ8Yw4IJ8Bn5IQiB2xecHTB4R2GcorwoO6Pflm779MxW9bDc70Aw2xVRFe/nS+bFphxB1qSkPUkOeFUeVtGnbW2KctZPf12nv77dEds7PnZyyAdI28sKD9KL5ThhHCXciJQ4h0nTo8bhQ7y0c7RWFTU9nZ2KFVidYcdJTz8sSXnH0zqaq93uye8dpHoxjH0qf4L/J+rV+r9q8rpUxOzDHHFumKvnKPJ3fvJ9YIvOnj0ODtOr1j+5+xahFCP7tiDWRZJVf3iywP7HRqySaA1SRZIRSSpJpFCFYKEFg42QBQkKgoYwMZERSQMGQn9qAEimXT0EMzEJgPga4D6PpSaRN2/C8X547Idr3Ic3oM1Y1ra0hMUMdTtp4Wqy/OlZGkDNmZr+f4rc7rlOLZxOfSiJPCPpUcvFJ5KciXlqwXSmiKf8i7l5wYtYfGC4VSh8GUQ8kx5bRrFQoTwo1+LnBVpV4Va8TFa0nSBRiVMAlw/E0xsLOF5idvLx2sE2A1FVR53km1u1Wlms4S7rdLJVtW3fi2QVdBwi5OuidJR0/mwct51k2yaxvBK7m57otKvL9wjMhScbBy487uylx5UMJWwzIPrUhniS4LMeYYRaab3wclTCX6J5+6Sdlz7IyWfAwMYiUXlBThvw9E0kc6CYzw91Vv02fN+4DzbnGNftwGfi/Me79H8Z9XRrDcoKOYcmClRwe2jq5Skv4WGT6TXJrm36aqbJ1hIkv78tFA7AWcDFyOY3wv6P0E+y5tD3eH3R811VIKq+g05EOfxhc9MoKJbeo5GJ/ij+B7utw+5Jb/9b/tnYHX3/fnqNHUgO0DVRI8ATAkIvbV4Pu5mhUCFAdx7DylhRRXwYhzbIu/7fx2p4fuzyP8AlM3u0QkOj/f3NCAmF0vPKKCwdp1eQSJhlGRieo3lrByAOGxlHDV/kHu/yvgZz+Gc3mkpZdNBCcg9R4lwwxQKKmgSGRRR6Vxh1+dTrDNiC+cZBJEhAIgA+5w79Il8TtwGPrPLulxIQHOv4UuMCxv6xQ3g4NoEDCCTGUChy9vEU3O/l4R1NIzjvBFViybFQR/cuEloDEPjkdbQIjsmOPj7O4Mt2mgs6t0QfBHIBEQ5smNvS7RtZJk/f5ZozQS903MiR5iZjOeGrhP9X2PtNzCQymIW5miowlhMDqfoDJAxYpronSygiFbGERmNEw+JXmaNCqPkKEhGE6Xv85SmB7m6pVVZegVyInoLFl1UV3BQUOk+Ba2fRj4GCZj8eBkfNoef3lNE7GFp92PVmlrPo34gR6u7Wt/yMSrl/qvgfLnyMX3o5tSak3M0zTY6X916ukKlD1RCx0OkEG8BCOT7SH+x68sISSTKWfmskUGJLP5JjMk/zm9nlJPYk/+ByPFWr9fVdHj1UX6kkncd4tkvFGIghZzmI9lQUcAjrySp1Cd68piPyeuTrX2HePzbN/fP9tepmblfvN/m684Ebs0RuZ+VwdRe4n/FoP97hXcT8+mkuXyZBjlo6g7aFBzQi5a/ZowH4AmBCS1VEIkA3wMmWJd9GuzcP6f0KKo2++ODxgY52y0p118/l7Q2EwoIQaH0HvHspfA/QMBBByyQeseF5pyVDsUm2wfKTFSFYJ+f4XNUWb+2m2lUR66SbISoYl6UorAxz3n3WF60PspXt+3irPwRicAtPv9l3eam14S9047uWSrO3nkxdd4NtRHeOYexbkPGVSdEr+eCoOe9p+JpJBwi0PT97uF9Kx0cnjM1JHL3JxEUXaW2B7q9qY/Umb8dutb+MSbae96id4HTQdXgxMEcuWiNk/lcQnHBLw93EhW+tiKb0+rb83+Z9rfplVx1Tm6O9m7unTvonhZU12WxXBKnLyj3y8Z3lvlrvhnWz0JN9UxDqlj2OT7vrjBdQsI6EPwK4ezURKt+zx0RVc8KQ1w95W9/08Zih1jlH5Y4PmupRugrtLQmSEyUznE9u2PNEiDOanO09t6jo25s5dulvlPY92PCa60qza8/NqqwVQcROU/+myOKfHrxcur/XE1esMJ8VL28vlbSlkNZe1j1N3Gce97S4D7yc62R76hfZx1Lg7tBB+k6Qo2TQy33WX22tWOpdn2jiCY+Ny3WPVZ5bSzqtUfbfLKiizVvYUsK9e2jT7PvXZ6UrVIjI9rTyqZuiW9TDdmrMo313ZxcJikfW3dPm53k7Wl+fuuPFqL9NhV62vUJenG3LRp/I1PRA73TnCLoR5VRv1SDyTm5qLtemrEQVnl44HNgKaE2q2dSHGenPMfDwZ2zw12bxz6WovAzZYxdqrVlDyoogjFm7UXcccUezLJeNZ6G82ojs0PBJUEkVxhZF0t1O7W+QRd60dJMsGHbIcdMbi/0QUBRHXYJw65ZRqPoaagipzNtiksjqnIoW0OtDXCg3pScl3K4g663KLwWqPL8srM5RXS7mE36a6mWKa0LjXhhwT4t78uIZTCO0R+fy2g4xxnNWgtepc4cm/njiXEiX1Akd0zzVsVdRQbzxrBgvSsufA7N7nnIKhj4OcE3yLxUlfi9/hVTa8fKoG3x741sp28s39XWGOcvXwdqQTLsnTihjGoZR/Dbt1XNPWUvOH74C+mOHhVO7NbZJv13vSKi4peEEfxUQX874WV6vk9r7JzL3U8RXj+La1iQHZV9Ppw6bcFKlI2RLNaS6Fts6tbNokTr5ux6f3eMPGD0WFWoieNv9HC989S/BfDQVTHei+s+qbHz3WtanJzQRY7TxqoykORex36WrA496Nrzv6vq3XiMwruc9WJtnOFKv96LOXOH7bt1Y0u4ceNNPVyoRNk1yHjrHTOeJnQ6Sd59FvdeRaPlNxQnMijcwoygBaf9/ppLQFP5zL7rjZ0sUN7hCfCvf8LXq+p/NI7RKtKEgMxxqdobOJMQxCuKFS3txMIzCTLD1wkOaPjT60pC19/W0FggKhATFERkrV9TscYcSHsPuiRDAUAOFgP5m/MExhulgFBtSJ+NoSQY3VFC9MTSDSaWxFta1qoijQ6EAge5Yiq577egOxEMTqtFNOKSKSE1jgHpDmOzUnYYB85jEO0wsVBUDgwnOHvAuS0WTmPcbphiMeQ1wbRstElwAcb2AhontP86NsMrLXfVyLoZ3CXcCENgfHSJqH6XcR6hyNUdA6g1d1VoSbz5kh2pxz3NgmzAkNLCmmWNw7xycFdhCBFdS63Xx5kmIUUyEIzENOoOBgMhQofb5klnOc6docuiqKewQ1vvZNBrLPsuhxwKCGRpYWAKFaHuzlg8LkDvDxMg2BmzOSdJWlHECDAskC00CWDoajrDBN0MiIvIH6iAFI51gYHyiUGhcyaXXo5he1ueQ0ZGcLO4Y9vLcGQ1vzaQc252onTiKXt7Dp4k51qSiRNxQ86UqxeNy22cQsiZBSMuSg+UK9MZqgsOWgQ7vcuMter0TastrsZGCCsFGm0YEIQh4gbwKFxA7g4txubOaUHIyvOWw8mZtufxhA0GHfDpUohoa7zIkL1sKsPOy/WVJoIxEQrwYjhfml/ezDhtRso/JFN24otOxKNkEPLXjaMAxDwkNJ3qfbCEBWEEA6wlm6Kw8vvkKNC+9fb67deg9JuEH6GnXdSGAsUixBBQWAbngrADSAcWIt1cCQM0ogEWHB+ozdZwgX3HUCDBFdJumLUiqsCiAgMZaVhf5dg2neCqICqosPwlhOXYpgEpSQ43JAtJ04DmgHOGMnBXIRkv0D20eX/JMFUMKgeoqRE3zDFmgUUoEwieRp0S1cVJuzLpHj4eBAZJpMiY9DzjteIkNSSWbLgWO45hzSKDd3gpDek5STzFNhjyTOpsfpKLkQKIEiSKyMIsSEKUKFA44hO2VYwYxn55ocLDcjrwVfrIEJIoe2dr1iZ8ghmS5QU6/v68COrO9UDQSA6ORHK8JoV0pT2CEsy3W710rzTWpeAushH1J4OwM9F9CGEIBseez6REE+Q6Q7g/uVIITvZWZNGKsYO8hiATv1T96W5rC7vDaqf4hBTckQiCwJbCk58CE2OOgeJYHrgwJixN0tN64Avy2eeaGsIweaNmSWJSwGFJFmIC3dZzUIxSQkJBCQAkgkYQQDXyIv9bw7A4haCr7YwsP0/qzwDFEoDxMoEUA7w55YPIlMA7qMHrNJm1yokcr9hfsLFPsJ3X8bhcsw5e19AXDMGcrOGJtssGCWRum0upLybGclAesb3oSZmAUE/hsUpzBpM8k1aJKSrFYR04Lqiu8siFZkMSZHmBY4wT1hnQtx/P0KIlfELnr2cx01FvtD9ARdih80Q9VpBqonUiWLGXM0/P8Re7Duq4PwHTfujVkGwv3xZxiFlzEjedA7FEhD+aJ8w0f/Lt6Ib/39nq+o6BH7asbWjcXaD65i+qcg9BMzzsqv4DLT6V+ZGnwE9kY6GyiDQov9LBWM6fCi/xgVCBwf07W/mfQlCPt/MlaYDmgZGIBQfq6dRY4eM/n0wU0D+EbIaZpoaSRSHiuyyKRANJ7K1DPpDvIGOcPw/m9V+Ecw0qJ7joXgQ0EwAwo2MUwDkVCPl8PaZ4ZztFEGCixYb9qoHrisGQX8o4fMwx9a+XyYSefc7d/rp1dt66HcYHj9pDxGvhLZQPzWVFyrJQRICpqwMSGMg+skaCMBnHISbcglkEhMNAGEFpCYFuw98YMqEEkKsaO/f6/2Z+BgbfVZWz4hByPgDwVYvYmNyZWFB7GW0kSZcm/ZJ+4QDfaG4iigN+IyA+zrPB1sDVnDhISXDQAIRfxckL8uoe24dQK9h6Q5lA4FZ10LBkEEAaqQsK2P5R2dYn8YGddGnWeTDdsxosolbYeZcxyfSXsJIxP2nmktY5QNnSIn0/IFzs2fgsBGxiF6I3ku0i0F5Ne5Gy6TwrESBhQVmLFEDufwuAzkXHB/KZAlEBxH57FEQTbdL4uhDeEDykLJ3h/2HGIfUp+T56zoyQ0kKQ1FZ4OhANL6wu3Pnsrf65DwoSKIjCICsEPtFEh5ZRJcJfgEPJ9QRDMsAfXseJ2i/aY1AfFMOm4OzVAlDSFNdNBikkiKEy60adGDSajMyGaYb4YGzSi6oY772FtBZLrZwEQ0hdargCZkrs+7aSxizVuzQZDPU8LvbCPDiYmm77wxY4FoNKiN2d9Sq9X6JjkgYWjtw5RcuNEd/njWYUHhRl3ycMUkFYo6mbj4/rNFBj+78/2wP8VWtB0A3pBpYFjynWUSyDkyGKRnNU6IGABg4i2IpQQdRZsvzJdeHeeMfh5W6B35nqioXCqKYq13wLQZ9Yyw49TCHTYfnpVGZlKLkQqAyCjbMBEahgOAJYRKkxyTAQpE2DsP2RKMYelx1K4OUxEIKAdPtbtEu4qjV1Wx2uquMfzM+bWtY5HKWN2EY5BDkfTd/JMSHTblHUUG5sYL02HoYtQSekbvAOpzkcOZ+oibN8WfXi/9iooLUYy2g3iCdNeugXvmO7oEjlWRg44RkgHOwPiNgcGx4iHg5Mb9ngUPuIB1TSEnMW1eakzg3DOfYEDw/ufRibzQmnurlLB5gqiQCBYjIYsWjlzsZiHgsAfH5ytyaUbnaIidfSFAtiMoonXl0faQECP4xHxoNDjGpBMzNISQ0GhGiymK+ddh184aR5w+N0nh/4VLEpTG97lxGaASkRhBpuix1fSJiytYc7zJdhvmYZ8rhtBm+5udLxhyzGIlNhEI2y40mXpJAhHDPoz6ts0lhncwRzG7M2LFwnbpHxUGxluODMECKMb4+OIWv4HIyz5bZGlxp+VwmNn4OODizhk1qduAcw2pMg6GsHVIJMY1J/Om/Pjbwk46GYuIOzHglPPPYtxSGnXXZ9+prBk1jEhhECSWEXbRiKK33ZlrztM0RMNhxirW6wyMEVJ2JCgTvplg4aDB3gUjjBtU452yKioN5fJsiWh1dnDIiSMOTu5HAbQ2UzdkFhpsmR4bEb8vqixEPmudu+2HZFFj4F3WxqCQ4QgvfZZgdh23iYLYjKO947b0RAIDoxhvEm/hY/DrjbgrszdfHEmFt0dnwzPoULTZ+ydGb3UJll6aA6wiiTw3tmuUw446pyBSTA62FI+o3ziDMu1PEFYINFKsCKZ9rmzMEFYHgLJgskoQYd6JZBJATS6lZ52vE8aaC3CmN3kadyiR01RGrpt8Z+Cyat8agglDndMzwD7JltviqwRWgd8uSiR4eGVq1STRBKopK4W8YkztxxNpUbztW/GtlqqUIgd8ojBlNBF1qcYqEJP57S2FSBGnYQhP0d95W+3SNOD0Zd7wOQPSiGyoGpPiq0EW4KCmNsELALKLsfO3GZdUmQexUtD2k1y3lG4fsj7D/BJrt4YdS9Z97Tfhu0ds6DxITG2zOSCFFl4Tq5O3Xl6oYqgsCPFtixsgqZu4WzYBHAcVSZIGaxg6jINmYM6nCLzB+j8D8cNKVId1im8yybPW+VDqJ1I00MoTsmt4fF0XaTV1TLMb+Fxcl3eWqMWR3a8k42vn4OTm6V4HqvWMol1Kyb+U4XsHN8cu71xBEAiIOXjhiiFCdGkYw4GhJra1tOY0YZu7HA4bB4hqQQjlGy5Z+EbwTOt5pWkKh5Qj3Ny2EdsXmHzQQtS7RhLrUrN07jk3mJ7LVWsc1Gnl1xNxqhVRRavKNtUR6XH1Lukm/rnKgSZYEA4wQiyLgHU5JgkKDUFwMU1QYRKNjiOABcYrCQQ3BQR6jHMsTEMwZAlBYihz4NIFQiCBzU2CIPpTMNiCxHQNLmCGcW5QZ9oUF7RHuhRItULhFM2H8trK2uoTF/VkUFj9GIZy/0EOYEXSaDsAm8niSH0MD8eMGKJBEUGJZt+LkMpBFSE7DAOvmHSMOLU6h3Zd8fdOV1i7LHLtusWVLmbpnPiiEFSVDshCEzmsKgeaoKC2IRN+TiRoaw2KvV7IMzoeV8waQOosNdC7Ytw80JpOqHmNiKvEiZgyTMOViQdTDcTQxcXcUVHMQT8N/usKccNU1IKpf6DNmeOk06joIfwCDnQyRP3QX6DQDpX6jiZQdDngcGIl+jte0uWbryihIPPjKG5VbqBZL62WelxlD5K4hQJY0UJKCJJKIVESwCoAXzx5n1j3G8AyhPr1j/VKlcKMMh9j/dpdah50Iazhx+pKCji/MaIGF9sANC8MKlzDeIug4wvxBMXlNpjRkFIXwDq8ohHPPpdwyCelCAiALCISAhWGwQMB1D6QxXIiZeJO/SfV55b59HEG3VwHIwzoUSlSi3QgIiUGICYcqHsCFA5sFn7UwiDjSMm29GfGgCzbTQGxgIYgrMFYvG5SMKJY4NwgTNwMzetj1Bu1EjcQwUMiGdKP78jZiXVPMQ7NoSEHaSy59H105rzoIVFYvWc/UUC7NEOYD8wwZjlmGO7Lu0MSh+3p8vZhV6Jlvx84m7M5qUPagsknUNQjA7n7oGfxkhQrs3WtbM+APiypM7I2Gw6Rq3DhYp9Fw5Fv87k7Y1PtikZc0C5G5Ejc2RskG2VOOPib7XeKUgtbNtwKE4Oy6GjbObTlwzwxR2z2SIMZtghQkDdIZRQFIiLADSSoSiwPwITkSgoBYARYLJFYJFDv9lWImtUDHUDZCwXaGrh5ARhIjFXDM/rJvYI+HwsLgIMh1xqaDUjlhC7EYFDK3ClL4wsvVhZ32U+kkhBYAbQLuHZT/j4FxHcCB1RWOAXLAfPrOMGEOUd/jjZjaoif3IASbMCLaiCbeIfgD319Yfj/ZK0b1FzWTQHuJAkiVuphRQ1CuaD4osXAslCdEf8iIh8Dzcoccbej5T1nuAo7IhnBC1yWIyylowRitKVhWwQrINAmZYdDG6QA0KMGJWUGAFUQlECllAUS2FBYySMEEVRkWW2tlD8mCUoMYiiYJMZRhiRNEPZr5jVrfjig2ws/XLPtji7ByTh5uQyAUZV43AMsCdAwAhbJBEgTgZMVsKvfXdKsf2fw/s/F+v7Z9t1ohDC3trQCa2aDZyLPCfdDkR8vrpkTzyIaErmGv7n0msA7h8YEJIwCDRcpDvX4hpK/HptPn0H5fqfm8YYiOAjZBbAG9/j8xAdxc74186cgO8OLxyiSVR5qI9k09CUCqYEI3EQRUAgQYQUl7CQCzIIkRjN2jUrEcJKERg/r+KzJO3rL2/KNRirEVkSLgTYmgcL6boY7ETozwfJkPGKDbJ9EPo/f0Wdo3NQ+r5ZszZlf53F2yi1kENjqzE1aBeVnhInWh4Csh6/Vvev8k58LL3UUO29L6z3ER4tY/FfwteB45mQTnCZHnzhY4Dm4FpzrUxPr9R5HpNg+SQtRXtlQy2SdowKKYUlQJ8LpJMIxAQUVE70RA+qcB4BkhshgXcZgUbIyTYw3d0UBZE6c+XkXE+RUPGzJJoCSMZITiX4EL3JZOphmdiIMHldoxCwKopKAsROnnxMTcJvM+qOmaiy7UMh4jMNic87MIFhQj5T2ebMfWIeKFJYCRBJ4r5BRITnDJjc4oIJAFkgiCYpCOl9Bya9vhVWcS0XyDR/Ya0GLCMQSNFJLGEXzwagpIRmmNrLPEbMGQELLOybhqHTaSTW71Mwxx9LoUXLKQEsRZR+luH8LJ9SBa5JROaQSLAMMqKLCjVAtGIDGEFCoUZESIi0CyE7QQAqKyQm+5puT8uYLEYCn4ToBsBsTl1oMm9YyTWFoxGSBE2IXiFa/ceYfbmHNxnXNCl84lu3urFmby9n1NHfN5fxcUPTDriokRQYxiMixGtRfeXgEFS991hEuKfbIyDBVrKD6MwQ1Dt3hnUwfbQ+x/JDkUU00dpdgnyhg/T59AFBPaAfdCtQkijg0bZRB/r+U4ngu4cM/xFcgM0GJEDvOHIw6+3E92BYWRC5xPdgasJonn03jHxNzGnzrazbueB2d8DwF3IoRg8Hg1blfoMFluCmUV4a3bgZSr7r2+bbOJgvTfNTOXdK8NmGKVXUNBOKYnKmC4cHTiSh9uMtbhI9ttXN4RvacmXyhtoHuQdiI1E6EmHEqfC1LppRSGEnud5VPSsV2OxiTtFPYeFBuN6/SNh93wcMBpe5c77b6A0ifgDUQQ8wHwjiAkiD6YB/IiCuENIRB3eIOeZBjYiSBIrv9Z0DdgCJ+me36MCgNPu+Y+ujBH3DSbbIe/IYPCk6yWylNElytaSlstpWWfBZTLFgooW3cgfcMTglyQ/GV0MICgeCBBRQUhPuKc0heHnPe2iJIOZokMJUSJAT5vI8DoHSFogODWAxZImhs7wkJxzfpGPkhnL2Yc4maItCroD5zDRm6WPUcu6imB1eBa00YfbfBNHow34sYzh8T6k/1M9KFlsNOIorL6wsY2woYagsc8s9UNgwAwIqYZFwssMKXDfw6heBq/6J1YjF5MDrlQbR1IQMz+INWeLCJLlBJIpCHK2UtJbZAtp8pczQoz8BaBhE3CqxieQSFCodB8khk7Iu9co02Is9Oi7hO5IRBIrBYpEVVRIMixikkCCBFIkAihEgDE5avpIqfuT/+QaHm++w5i3RIYIB2oB79JAKQO0ENYgdcYsYJIQkX5aQokQgrIpGCxUSPxh7Id0+OSd4o1JpA6x6CUvs8g8iDTu7kKR3YuyUQIS4QJiBhB4RvexQQLkE3s60aTNSH78TupD1+UTQD707uKy4GNwMx9gmD0CGywy11IsdiAnWh4bTzmzQ7AXQFGVBSZlH+/RpEuHgepFtghCAPo+Xhk56kmEzcQZsjqCRt6rsSDGGEKMG20tjbHomjsD2VSPhDoFSSXwWe9WSGwUBoBaAQyYPpPFEoPDjTIshCHyEKLaV3BmTWdYfE7ritzM6NVBZ1gfsZmixo7ePgfDNYR9EKyPKfz46XJRY7bDkjy6E8Y5zLqzleNCfHZghqJRKJugYiT8sgqDW+RfLMqRwmSKTfac29L0uGork0pnCxBUdqyGDPM5iFW5UbPBjZkbD1+Vzy9dEfO4onXwvmsnQMEvOnZrROwSmhBwjaqlP5Cql4nY/YnZQ/4aAwBlDn1IECBufhnWnkMRfQDhxjreX5g5qxiEWBIEZJAgwYqECKc+Zj/46X+fx/Fj/OBhe1nc6gK6jvEjO4p69vFpvZhUipejwbvv2TQZh6ZNEQS2loqy0q1Jo2uaoRd8qHoot+MbJwm6zC7ZQiiwR0JKuipbWIokqoRSqliRRthWrN0oPAUGTUpuIsS6yRUojQaJGpUSvLjc32oyZGG4AgUkQJvAmwYf2jiBGJe00eXyY3NM6kAeK6kAQRxBBcJoHUkNkQhF1hBrcDvHUYD1YyyCqbz5mxBhvaMcF0idR6ijw+FfKXCBdKLdcoiS/s+TUNZzgsOG4Gq673IqCgs8aee65tYj1ew3jSTCE01ZYSLIcQiXhL0Cchk2VKIKNLO77/IrPAL6WBRMKr1KGNJty730g0SwJt3HZ1Ew3qqENRjYPi/S02DvYzGB5jYnFEOJQHV93X8JtdWrRnLkLI1I3pSQdVTjcKJfDUJGIEAGABFNXRQIsDRlSLcQrFUoRyV2jRm48+hsKvCr0dhFyI7RZ2doB0UklC/Zdgg1b05uxj2bWw13P5VtBeXR8QTeMP8LZ9jRTYCXZDJALLk5EHguubWNTMOIlORCwjtO6wQ7M/Y3bf+IcyNnWkC+TQpeSGb65oEgXaTOxP9qnANkQaXsYgmIZwp1OoKshlS+XyG8sbzsqeil+iYEOpOc8RkWHn9Qa7xF7xLES2WohRGyKxkESiCg0goFVAFIjEkFqNowUrSIELTjzGpT2M173OHMApLQfOViAyCTg6km6HE6HtubUho8PqrxOzCnCslyrXRXYpODx8LZmXiZw6+tM6JZcsKIaDUUF4XJHzHaWeqHhVGBeufY9VkfFso9cXkdhAdxkEcEYNkE/bEVJAWhwMcGwLYXrLtozObTv1+qF/ZN8edJRlarPheWAOyYSQ2kCwAa2npbv2Z938QBAS3BtnDet4ZifDtDVXz1npWSECBAl5J0BAO9DhBlILFgbEZiPQnAYq0Rn5jIae60jc0GvxMheNxUw+0aSu8JAa4XMeYZACRLbMSzfRU2lhpXQxmBIDS0wauYFClIMmUKKKkGG2YYioK6Q4nSeSaIfJZITaKJAm3CSmEio6kKTJE4wgAx3IAKUqYk50tZtA0AoswZDBAMVEYvQsKaQlEGTNO4mxjIstGONEb12ui6p0NLzhmZvoztwW4Po97vYM2ZzTVsCGEFGFNaWFjMYqBFOYB8D42DYc03T0FUCzypcyCDm0thsgjRExiOmC2IN46YCF4opVbyvUXwSQGybBxDOBcbFCUCTAQqFAJQigk2aL34ce8ryYo85uGokUNzxN2yKtpU+5ub6NnxZIKrEU7bFD6l4W2I7VMC3qqbw8phph2iFWeItwTuLhbPgmMiyAYwp0FIlGNI3Ypdk0aJqMBErGahLIiGiGEaFJpMDeBMklNpSFhTZoyUiYgLCCKKgsFBYCyKRQQQkEjBVY4kASMQ1g3CkR1twOJNYRcsCICcys4AJIFoSKAQwcJN7gCOHl1Yz40eN6CMwAHYJySGb6twV/Miu8oiCbC2xQl9tJv57O7A/SQj4ru1EgASamDIhEK4nZ1VI+pO7vo+2Nj3MsvmC6FBJJ5RgiSSLBH4mmeZDsz2aSzHtyFaqywE3Rcaj8FbZ0Svo7ZZPAh7x6fKeyJ6MLPOWVYVBZJuZuw1ppAoyQsP7t5AySYAiEQkkQEQEgCwIjBYCMIsC0RkoaGgrEEEaShSwrQSyZEPTrRheNXGKNoVERiIDFUGQzR09TzD4mjsHUM9FwOxNpDjwBhQfKZp9p1+rMWedhnvq0raj8aAjEd1F3GP4xOGBIEEpiEw21+oyOL7K26y3vIAqGYyN0KuajBKmZ00FL7EB/ORixGR1w0HFNbG7wv6J4k536FFvIzlg+a9vbL2YZ8WxYNG7nGEDYrgUoWA+p64MgLc5/Q8ygCmJISMogUpAuhnpV/CGdDJROBpU4iZKj9v+4V7/xG3tIwQxiom1SDCSEIRgRgjpOft+5yN5imR4kC++VddADY3t7BcKfPIkAk+GfAsK/1xEPQNwhFCJGTedxcSS0LCIVBqrISUIQgIVBznQobgwcAgLiCdUF/wVyc1KVmaQUq1CXA0Fj4QzIBd3EHUJggxiRD4C6C4GwUJmB1NAwAN0gG7cQNJFqJg44J4IFxtaiGpM1FBZgyfaRDGGYFqEOEUNITBkYMxlTSGaUBXNElCPBhQoeO5tCBsaFefYqf2bWqc3WSnQMEokzDuR9gA0Gg2BogaWsykgyAHMBOhBQdpAUCgIihrokTtPc/H+Knwul+IpUqpjIUcYoLYPBOwTQQCIhn4QA3HGIP3RQ8IQipYggVFK3EBiV0Tq96EHMBkTAKLBEJSnPOw1wfZPzCRiF5h6kNp5fnAsQgOejJF5ir0B8Q1aXtPu+FkkVvCETtPvF/yOLGmQWCyQaAHxns9Fyk8+xVPv5CG/wnjAsvsjIyC1xOw5lMaIHy0pQknEyhTw+M2rIzIcGfY/U+Y9u5Spg+uEWRb99TQxth0ES0pGjTOxBFQyJuRCWbBibgKBvCbMmCwvY3kodqZsjREMQaMRmKhZrzMDs7iAdo8j9tPQIFgEgfhCuQ4fDWfOIgMRDoPMAgew0LEYuYXgjx+9nh8V4ZibR74TPmvH8UkKEuXoWIm3I4xMmY0djYEQaBZlYLlueqla8jpZ6MtN0KDQhNs5o8Ae0gEeb1KH6kSm53KHmsiZKyESwG4c+yfg7dpZ5yTCNg/cF7OjjeF7F+RrtaW0DnhBNw3lImQvukrYBJFA2M82JAuEE47RG6uxM2J90JMAN+ZsWoxZJBUBZBYCCSCRCLJEEgKAsRYEARUYsgkRAEgpFiMEYLrXWPhyTvw+c8NOXwT7uYOKNdoz7nqd/IpQokhCjXRbffc5E4BR1DDbuoLq+ezUTSGezWeo+6fLtrfexR+/S8WVDdPysxUJTr4CdEbW8ZTbEoT3mvpRNhbsnHtNSpELC1h20mwghUkay8PuOLEkP8mfhWaiCrcpG6zTwisJ+G3zBeU/IHrHcEgEN3EzFaEa/5HDr6z4iWEsJyBiGYKaRgQiFESwOGSRxr5Gs6EBuC6FgGaloQRIOA0+Mid7hAmQ9vfnzhmFOfAn8V2fWl0npeICRgsinyU909NNPN55xsj5Gx94HHbFVBgQbcP7hULYy+ZLeqVE915NFCPqZenLPeZmizvfUT99BFj5Se4oaJx3WQ7i9MUWHMY93cZefvmprKRScyTD0Hs9YKTUsfmtD67pfpl2CmxXNEGWg+4zGJlpFBBIjDYywDGAwSxuiwBJBJWFEbSLFHEKLU6TAhd/IuBx9XAwxNEJhUIywxhHGNrcoFuz2xR4SFw5GbUw1Mi2kmtIy4aLjpphpDEFEIllBUaUXYNjU1NU3QwNFs0M0K8CmLpIYRmnWPllKOHIj4IFZjcaGmLR9RGEPUj9hBW4ry0u/ur2dlHmx8xjv2Bp2vWFouc1pySn8b6oLccgkOyFQCKoongHN0cu4PLA5TM1IG5JJYbijHI5WOP7I8zVpCqFcNMsKNwTip6yZe+DatDTyogxMQfoeZI9sWmUXznsD2JiTcN7dUMUzadEmbEQZLMUCZOwXIrUSJFhIfA6it6177Ukh7oSig8qaXwKOmau07Eo92QUhB8apJngaziQnIqgzBcx9HhVkDVG2SdgKBggMhzd0q6s0DlllGiaoTvgSwIxmMXcHIIHx/cmzdL8SWKfDOfnL0NHW5ytNSCwQnr4PZcGh5U6a+VC2hWBohdkYzode4uZtVb4v44U+MNvPvV0hFQRM3DtVOvxpyQ0hRrJ3eNn2Ls4+nVNGmiUtKNi/FmP/rpytFaBuOZdhwZDsl+B7+Dm8IbwM/qaOw8TyCg86gPtCXl2hJ9lTNwO66y2W0CrBGSVGBNYARvSL7EA+QE/tYTzH7MgNABoPlgEGCyIUHfupHWDAu1BEJAJ6hdiWfpM4ZRHRCMRDf70H87Dcd+R2x2yd/MtaaH1IJ+Td6g5G4+Bm4iP0xyQ7DkSyXCRISSCwYZlERXUzCvk6RhVuGEi1SFgkuDQLQo6sAQwBu3oSoNEAJAAYIW0CwGSAiQUiKlSWg0iwkUkkFFigWEsZQogWCAglAsQsgJEBGQGMBJZ6w7wgsUJFkUzmqPxguwIPuCqpgw3D47z5mHKwsDnlVYYMJiCQpD5/fNT7GmiyYjI5+2eyd7c+7+6/35cdx3d+EOk1+Vw+t9b1owZb6tV5HPvh0WyEcuGoMoP5PUwM7Mdr4d5A8eNqwT67dqwxHuPl2KMs0/qbOTLafi8UNd5IY+DvlymC/4CYZgxYF7jhjaWgnu3SKQXZXDffXUKegQ4lTWgrDmW8ndEcSSVQLYccVMyNrHRBIQCwuUx1h2DX24jjI7Onh6/UUdJ7KlbYfhaXNNqCt4kolys0v8h9RIucbe8nbdTMT3tXTpTzJl8Tu4wjN2b8xuLw6ZU4M4mV4tBmfWp3mUe8TZk6U4huSTYsieCknuXeioaCfrIhvXPNksmFrQsQSriKe9J9PaJVOzUWzyRiMPKiU9zicGHIuWh2/R6Zgt3NiwbJSp1qNoaNs4nSD5YxWEbW2Yo1tVXExWTO1gbTpsHgOzmHB5jS1vUqVE4oMHz1Pra6aTWM03LxDbICQeBbbkVIjOYc2VCRPOFCGZAocjNz42dAAxIsOJMiWF4OCBYeKa8awhE3ScSGihjZCpsTMCXoGKjeYt0uzhCQqdDkql3jw1WfANJtQiYYzH/nD3NWUUlq3CiGcdxvYaMmLblaK5Yq1FjjNGXDFFmw42oTGt2dum8kJQinkguPR41RKKW+nlZp4WIWWVVPkWxAogkIl4JXo5z6T6Ijx2NQT4wQYe8xapA8VrTusC3+Rhqhh7Pv4GQacsnaTvn1QHTpSCHTRBcHXsNC6dSCmJLhqmNswhCIRuN+KaKTIWHnmJSzgQsYxjiSskSNpE2hMwBjGKAa0QBSCoLWDUMFwAsVEgMDMepmwgSLr20WbU8Xv35+dLsc+885R87oaBwliBAUiH0dlC32Zgs9TodDLbIPybUR31KG0KjUFgxAZqkULhSsFK6MyEwqOvhjrSYpWPUK3kIpCacpM1mamhtllooWGrp2VlMxBqvKq1RqurLaRwikeSqgXCL3VcsSPB59bsg+nCuwdu6hS/eG+AsAN2BwQMWyIMeAUeWeyrGiu1mXEqU6pD0UFNQaJ3vyXVTSptGK5GLQM4MBR9UiB4KqIrFWPnvTYhff2/0cUw6DnEz7dBEdzqhA64Mm2qodAXyFCMCBYshwRSxQ42Q3leCH598wzHSbNC1XS9oamg9EhF2v3hrzGpwzNSKdIFjj5UdhvrSpRlKhO+sNieabHNAL6OJCcL4K8TfJyp4tnTA3wgakVEYCxYIqRERUR3FY5gYHyCYZCkRlSloFYCQRhCBFgBIgkIcqSiJyRClVBSJRKehutQviE0zvYRKyJaLtiIzcdJKLGOslMFqiIhlzKZQsJha4MhEQmZg5AAUqkNokpNxBAOGdfcZOyNEWpelj5WdqaR2J7/HBseYnODDc4N6FYMvIsxQ3iSkrwzeasUXViPbOgMlmpr5ucAhrBZfpklPhfvAONWPjVk15zLz5uhwF0dCaB7mffs3DGJGgutrA0uSpgYkOsMg2wgMQLZBzo2+ZrWTQDLBs4XwZhnFX02gWfbkFE6qKCLsTiwE+Z8EjG58Y68pJCUGuqCotKD09Dx07uW0sTJpvNow7iNIPJ0ZjmFLJ3UHHmnpdAtK4f4KzbCrM0/0RLEpmczB2WsWWs/YwWHXAjIj2tLnP18o7hvA5cJo+faocTdZlttQ5T5MvhEVqHFSOphyKwXI4DoekGjMsTVR88uzQMjOgcmqOssTDoG30nnfO/PTi7oZymcYTDdmg0llqsXVnLFBpV8DRuIMLCLBBmUGoB3DiCtPhmibcnZjeKjiINWwy4cKCm/FXTRuhjpjFzU5jxhgMvBDeK4qna0ES0s2bMZfdtQ01nI5eN7QwrIIkIGgkKZJGbzcU2lbQ6xWTWM2ImBAhtd8iRubkQKDQw02blFu1HtsQDZGRkHGcRG8dMl3NFMgbKIE2RUmKKrmcT4RXhdxkQ+ZjeamiiE9KRCjTj8W92+G1ckkW9CFUwElzJMYTjb5B2ZsD2eWKJNkbG2FcjLfBBTo2fuxdZdYQSZy6uGZehtVoaGJjTYxo3KagjMSlCbsGxpgsGJqEC2jGc5TRg1A5lodREgcAwrhRbeCvDhMO7TVMoMEKZzLxRGuZitG0Pitazc5WMbDJb4htBUzoaCI0AsCqNjQmldBU4mHGY0Nm4VEollh5jEzlCUcuSjSHWNyYUhvksy76Y3yvh6OfMOSTBcBEbA0lyjEWDOWd0YxJqZkE1LZQwiCTYgNFiA5E0QOKoIOnZ6C99RiTWhszDGexlm7vPqsYFVjC5DZHAl4WVPYonEbG8A1IH3uynPuKS5AWRXIM62LDhm5YU3TcHY+dC0YIDgaBF0+oyDIekhWWDsKXd0yagiMEiJoMLBgILBZFayysbDAgRBjCSKMxEBMgqlShyDo0PKdvBy4hXVSoq0pRNSmIwRbuqsCqORuaMhLGHeHxGTizh9WQwIYzARaBzYTHJyj6HTKaN7DKJNCG/bEhdnB8HPmYJJ9IEYiJQi0p7752Zq7S0YzPTaIaJHUfSrMZfUU8IMBJ2gbBz59dg0EUtiVccnCSHHcdbgdQJpGI/GKtKlQQaId8UpDMIB64ABVsqpSyGc++rjbDldFxsKlhE2Nwx0oYnG+y5cOt8lycE1aaMVUCDGR4dYTGCYTDfwsxQIQGuA3q3ZB9fN6u72doXHEPR56hfEg3rgSgWo1UFLICllRRmtXCUk0b4FECNwpLF0aZcpvTSvQ0GjznH7d3fDGYqeDKbGspRR+7rzOOy7xd8HJCdkh2gg7RROYRDqvU4etMDOGMek1xB5RU0iJ8SqqvWIcgcAaeeWtIjCnL85wyWbHK/NTz5LbA3yqoqw6iFKIw8RMA1CFJ7loKN5OgZabaJONa2SXloYnrdSjnCAWQNFFgOmRnhm5HeJmF4XLGgikgQCIxSMH1lmFKfaQLsETWhEBtRmmHDTEPX+bq7uTsmQkPdBuh07yTRkCJDm57mBs620bhZSobbBoeTarWJGMulZAgEaGy1AyrQ4Y3JlSPeH07/LjgX1jS9LsSQaQaKmoSgQ9gdusDsDEUq5uBsAhqiuQEGg2gdexLGoymyKQ4/lJCEMSjHvq5LHhvQO1PIQ3CcqXsEOXByncRXKYes3BSJoGGixRDEILSRSEjERkN40QNWseZKTX5ZgjICwBv78xAYEshgBnH5bgakNIrmJxDIHpFCMD4EBYQdiP7MtCJ5gLjnCAfsIH4zteBGSNLUKWkS0GHZGDAm56IHnk8PZYfJ3VGHpaBSZ9der04LD20ZGXsK1CZZ/528yGCudj2UGhEhyYGDD0oDSg+Swtii9kFWh7sfELJQzpCxIhybpw8PDpepO4i8YEhSoa6HACQw7kK+qYhjeZkd4YkSGrTW+A7Rg9A3vf9c80rw2qOEJF/zsuVZmAUWAsJBQSG1hGaQOpiXCAFiDSJdl5BngcA08Or9BBCEzC3n2UFTz8alg5n1fSFh+YX8oQHod4ZZw2igiRPkhyJ2rb8DclZQFgyhQlCcNBcVx74/Z1FFoaJ1MMvC4WR5SNUwjsRnu+1PmwNIq4bO7taEEMpA82dO8nB3bpcEYRZl8QsGOWNH2F0xYZDSZDQWFSMAsOk+kAyQ9k9ZmMg8DIwF5mxp9xjCEViUCZQwILYnR0UPKb5+vwr6/P4TG8knb+rHHHDGbQdyUSLyTaruKC0DbWGVBhvItUxkn5BM3Qw9nto1es7w5jx/H3wugGdymNZZiB7aMVIDAiAsohCpIUZ3MjFEiwgKBPVFBA6dTuO/MjWoYqGp4UDogPyn4tglvlO3rztttWXR8kxWh86gtflE16kLUbPk+46RDZOTkaGWdCeXGt+/XeEgkQ6Z6myMifAMiYpXz2NqWzWm2d7VcZNzSsJoIMhtiqxtLoEDhDS5jD4KhHtXaYIJt0inVi5oaAyRDQZGnERLBxAxIro8PKjt5nus+uEnBDQXLTvpYQ/8z3xyiT0f1BAmfF/wFNFiZpLzhcA4wRidBG3ie1MOj+/Qd/EofWf1eZvUW2nd4f5E6NOAfvHI+eyrGDhiNNMv6OT8/v+b/1//F3JFOFCQ3lTu5wA==')))
\ No newline at end of file
+
+import hashlib
+import io
+import tokenize
+import numpy as np
+from tabulate import tabulate
+from datetime import datetime
+import pyfiglet
+import unittest
+import inspect
+import os
+import argparse
+import time
+
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Example: 
+To run all tests in a report: 
+
+> python assignment1_dp.py
+
+To run only question 2 or question 2.1
+
+> python assignment1_dp.py -q 2
+> python assignment1_dp.py -q 2.1
+
+Note this scripts does not grade your report. To grade your report, use:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('-q', nargs='?', type=str, default=None, help='Only evaluate this question (e.g.: -q 2)')
+parser.add_argument('--showexpected',  action="store_true",  help='Show the expected/desired result')
+parser.add_argument('--showcomputed',  action="store_true",  help='Show the answer your code computes')
+parser.add_argument('--unmute',  action="store_true",  help='Show result of print(...) commands in code')
+parser.add_argument('--passall',  action="store_true",  help='Automatically pass all tests. Useful when debugging.')
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars.')
+
+def evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,
+                            show_tol_err=False, show_privisional=True, noprogress=None,
+                            generate_artifacts=True):
+    args = parser.parse_args()
+    if noprogress is None:
+        noprogress = args.noprogress
+
+    if question is None and args.q is not None:
+        question = args.q
+        if "." in question:
+            question, qitem = [int(v) for v in question.split(".")]
+        else:
+            question = int(question)
+
+    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:
+        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")
+
+    if unmute is None:
+        unmute = args.unmute
+    if passall is None:
+        passall = args.passall
+
+    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,
+                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,
+                                          show_tol_err=show_tol_err,
+                                          generate_artifacts=generate_artifacts)
+
+
+    if question is None and show_privisional:
+        print("Provisional evaluation")
+        tabulate(table_data)
+        table = table_data
+        print(tabulate(table))
+        print(" ")
+
+    fr = inspect.getouterframes(inspect.currentframe())[1].filename
+    gfile = os.path.basename(fr)[:-3] + "_grade.py"
+    if os.path.exists(gfile):
+        print("Note your results have not yet been registered. \nTo register your results, please run the file:")
+        print(">>>", gfile)
+        print("In the same manner as you ran this file.")
+
+
+    return results
+
+
+def upack(q):
+    # h = zip([(i['w'], i['possible'], i['obtained']) for i in q.values()])
+    h =[(i['w'], i['possible'], i['obtained']) for i in q.values()]
+    h = np.asarray(h)
+    return h[:,0], h[:,1], h[:,2],
+
+class SequentialTestLoader(unittest.TestLoader):
+    def getTestCaseNames(self, testCaseClass):
+        test_names = super().getTestCaseNames(testCaseClass)
+        # testcase_methods = list(testCaseClass.__dict__.keys())
+        ls = []
+        for C in testCaseClass.mro():
+            if issubclass(C, unittest.TestCase):
+                ls = list(C.__dict__.keys()) + ls
+        testcase_methods = ls
+        test_names.sort(key=testcase_methods.index)
+        return test_names
+
+def evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,
+                    show_progress_bar=True,
+                    show_tol_err=False,
+                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.
+                    big_header=True):
+
+    now = datetime.now()
+    if big_header:
+        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")
+        b = "\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )
+    else:
+        b = "Unitgrade"
+    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
+    print(b + " v" + __version__ + ", started: " + dt_string+ "\n")
+    # print("Started: " + dt_string)
+    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.
+    s = report.title
+    if hasattr(report, "version") and report.version is not None:
+        s += f" version {report.version}"
+    print(s, "(use --help for options)" if show_help_flag else "")
+    # print(f"Loaded answers from: ", report.computed_answers_file, "\n")
+    table_data = []
+    t_start = time.time()
+    score = {}
+    loader = SequentialTestLoader()
+
+    for n, (q, w) in enumerate(report.questions):
+        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.
+        if question is not None and n+1 != question:
+            continue
+        suite = loader.loadTestsFromTestCase(q)
+        qtitle = q.question_title() if hasattr(q, 'question_title') else q.__qualname__
+        if not report.abbreviate_questions:
+            q_title_print = "Question %i: %s"%(n+1, qtitle)
+        else:
+            q_title_print = "q%i) %s" % (n + 1, qtitle)
+
+        print(q_title_print, end="")
+        q.possible = 0
+        q.obtained = 0
+        # q_ = {} # Gather score in this class.
+        UTextResult.q_title_print = q_title_print # Hacky
+        UTextResult.show_progress_bar = show_progress_bar # Hacky.
+        UTextResult.number = n
+        UTextResult.nL = report.nL
+        UTextResult.unmute = unmute # Hacky as well.
+        UTextResult.setUpClass_time = q._cache.get(((q.__name__, 'setUpClass'), 'time'), 3) if hasattr(q, '_cache') and q._cache is not None else 3
+
+
+        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)
+        details = {}
+        for s, msg in res.successes + res.failures + res.errors:
+            # from unittest.suite import _ErrorHolder
+            # from unittest import _Err
+            # if isinstance(s, _ErrorHolder)
+            if hasattr(s, '_testMethodName'):
+                key = (q.__name__, s._testMethodName)
+            else:
+                # In case s is an _ErrorHolder (unittest.suite)
+                key = (q.__name__, s.id())
+            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.
+
+            detail = {}
+            if (s,msg) in res.successes:
+                detail['status'] = "pass"
+            elif (s,msg) in res.failures:
+                detail['status'] = 'fail'
+            elif (s,msg) in res.errors:
+                detail['status'] = 'error'
+            else:
+                raise Exception("Status not known.")
+
+            nice_title = s.title
+            detail = {**detail, **msg, 'nice_title': nice_title}#['message'] = msg
+            details[key] = detail
+
+        # q_[s._testMethodName] = ("pass", None)
+        # for (s,msg) in res.failures:
+        #     q_[s._testMethodName] = ("fail", msg)
+        # for (s,msg) in res.errors:
+        #     q_[s._testMethodName] = ("error", msg)
+        # res.successes[0]._get_outcome()
+
+        possible = res.testsRun
+        obtained = len(res.successes)
+
+        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun
+
+        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0
+        score[n] = {'w': w, 'possible': w, 'obtained': obtained, 'items': details, 'title': qtitle, 'name': q.__name__,
+                   }
+        q.obtained = obtained
+        q.possible = possible
+        # print(q._cache)
+        # print(q._covcache)
+        s1 = f" * q{n+1})   Total"
+        s2 = f" {q.obtained}/{w}"
+        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )
+        print(" ")
+        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])
+
+    ws, possible, obtained = upack(score)
+    possible = int( msum(possible) )
+    obtained = int( msum(obtained) ) # Cast to python int
+    report.possible = possible
+    report.obtained = obtained
+    now = datetime.now()
+    dt_string = now.strftime("%H:%M:%S")
+
+    dt = int(time.time()-t_start)
+    minutes = dt//60
+    seconds = dt - minutes*60
+    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")
+
+    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",
+           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)
+
+    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")
+
+    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])
+    results = {'total': (obtained, possible), 'details': score}
+    return results, table_data
+
+
+def python_code_str_id(python_code, strip_comments_and_docstring=True):
+    s = python_code
+
+    if strip_comments_and_docstring:
+        try:
+            s = remove_comments_and_docstrings(s)
+        except Exception as e:
+            print("--"*10)
+            print(python_code)
+            print(e)
+
+    s = "".join([c.strip() for c in s.split()])
+    hash_object = hashlib.blake2b(s.encode())
+    return hash_object.hexdigest()
+
+
+def file_id(file, strip_comments_and_docstring=True):
+    with open(file, 'r') as f:
+        # s = f.read()
+        return python_code_str_id(f.read())
+
+
+def remove_comments_and_docstrings(source):
+    """
+    Returns 'source' minus comments and docstrings.
+    """
+    io_obj = io.StringIO(source)
+    out = ""
+    prev_toktype = tokenize.INDENT
+    last_lineno = -1
+    last_col = 0
+    for tok in tokenize.generate_tokens(io_obj.readline):
+        token_type = tok[0]
+        token_string = tok[1]
+        start_line, start_col = tok[2]
+        end_line, end_col = tok[3]
+        ltext = tok[4]
+        # The following two conditionals preserve indentation.
+        # This is necessary because we're not using tokenize.untokenize()
+        # (because it spits out code with copious amounts of oddly-placed
+        # whitespace).
+        if start_line > last_lineno:
+            last_col = 0
+        if start_col > last_col:
+            out += (" " * (start_col - last_col))
+        # Remove comments:
+        if token_type == tokenize.COMMENT:
+            pass
+        # This series of conditionals removes docstrings:
+        elif token_type == tokenize.STRING:
+            if prev_toktype != tokenize.INDENT:
+        # This is likely a docstring; double-check we're not inside an operator:
+                if prev_toktype != tokenize.NEWLINE:
+                    # Note regarding NEWLINE vs NL: The tokenize module
+                    # differentiates between newlines that start a new statement
+                    # and newlines inside of operators such as parens, brackes,
+                    # and curly braces.  Newlines inside of operators are
+                    # NEWLINE and newlines that start new code are NL.
+                    # Catch whole-module docstrings:
+                    if start_col > 0:
+                        # Unlabelled indentation means we're inside an operator
+                        out += token_string
+                    # Note regarding the INDENT token: The tokenize module does
+                    # not label indentation inside of an operator (parens,
+                    # brackets, and curly braces) as actual indentation.
+                    # For example:
+                    # def foo():
+                    #     "The spaces before this docstring are tokenize.INDENT"
+                    #     test = [
+                    #         "The spaces before this string do not get a token"
+                    #     ]
+        else:
+            out += token_string
+        prev_toktype = token_type
+        last_col = end_col
+        last_lineno = end_line
+    return out
+
+import textwrap
+import bz2
+import pickle
+import os
+import zipfile
+import io
+
+def bzwrite(json_str, token): # to get around obfuscation issues
+    with getattr(bz2, 'open')(token, "wt") as f:
+        f.write(json_str)
+
+def gather_imports(imp):
+    resources = {}
+    m = imp
+    f = m.__file__
+    if hasattr(m, '__file__') and not hasattr(m, '__path__'):
+        top_package = os.path.dirname(m.__file__)
+        module_import = True
+    else:
+        im = __import__(m.__name__.split('.')[0])
+        if isinstance(im, list):
+            print("im is a list")
+            print(im)
+        # the __path__ attribute *may* be a string in some cases. I had to fix this.
+        print("path.:",  __import__(m.__name__.split('.')[0]).__path__)
+        # top_package = __import__(m.__name__.split('.')[0]).__path__._path[0]
+        top_package = __import__(m.__name__.split('.')[0]).__path__[0]
+        module_import = False
+
+    found_hashes = {}
+    # pycode = {}
+    resources['pycode'] = {}
+    zip_buffer = io.BytesIO()
+    with zipfile.ZipFile(zip_buffer, 'w') as zip:
+        for root, dirs, files in os.walk(top_package):
+            for file in files:
+                if file.endswith(".py"):
+                    fpath = os.path.join(root, file)
+                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)
+                    zip.write(fpath, v)
+                    if not fpath.endswith("_grade.py"): # Exclude grade files.
+                        with open(fpath, 'r') as f:
+                            s = f.read()
+                        found_hashes[v] = python_code_str_id(s)
+                        resources['pycode'][v] = s
+
+    resources['zipfile'] = zip_buffer.getvalue()
+    resources['top_package'] = top_package
+    resources['module_import'] = module_import
+    resources['blake2b_file_hashes'] = found_hashes
+    return resources, top_package
+
+
+import argparse
+parser = argparse.ArgumentParser(description='Evaluate your report.', epilog="""Use this script to get the score of your report. Example:
+
+> python report1_grade.py
+
+Finally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.
+For instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to 'Documents/` and run:
+
+> python -m course_package.report1
+
+see https://docs.python.org/3.9/using/cmdline.html
+""", formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--noprogress',  action="store_true",  help='Disable progress bars')
+parser.add_argument('--autolab',  action="store_true",  help='Show Autolab results')
+
+def gather_report_source_include(report):
+    sources = {}
+    # print("")
+    # if not args.autolab:
+    if len(report.individual_imports) > 0:
+        print("By uploading the .token file, you verify the files:")
+        for m in report.individual_imports:
+            print(">", m.__file__)
+        print("Are created/modified individually by you in agreement with DTUs exam rules")
+        report.pack_imports += report.individual_imports
+
+    if len(report.pack_imports) > 0:
+        print("Including files in upload...")
+        for k, m in enumerate(report.pack_imports):
+            nimp, top_package = gather_imports(m)
+            _, report_relative_location, module_import = report._import_base_relative()
+
+            nimp['report_relative_location'] = report_relative_location
+            nimp['report_module_specification'] = module_import
+            nimp['name'] = m.__name__
+            sources[k] = nimp
+            print(f" * {m.__name__}")
+    return sources
+
+def gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):
+    # n = report.nL
+    args = parser.parse_args()
+    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,
+                                          show_progress_bar=not args.noprogress,
+                                          big_header=not args.autolab,
+                                          generate_artifacts=False,
+                                          )
+    print("")
+    sources = {}
+    if not args.autolab:
+        results['sources'] = sources = gather_report_source_include(report)
+
+    token_plain = """
+# This file contains your results. Do not edit its content. Simply upload it as it is. """
+
+    s_include = [token_plain]
+    known_hashes = []
+    cov_files = []
+    use_coverage = True
+    if report._config is not None:
+        known_hashes = report._config['blake2b_file_hashes']
+        for Q, _ in report.questions:
+            use_coverage = use_coverage and isinstance(Q, UTestCase)
+            for key in Q._cache:
+                if len(key) >= 2 and key[1] == "coverage":
+                    for f in Q._cache[key]:
+                        cov_files.append(f)
+
+    for s in sources.values():
+        for f_rel, hash in s['blake2b_file_hashes'].items():
+            if hash in known_hashes and f_rel not in cov_files and use_coverage:
+                print("Skipping", f_rel)
+            else:
+                if token_include_plaintext_source:
+                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)
+                    s_include.append("")
+                    s_include.append(s['pycode'][f_rel])
+                    s_include.append("")
+
+    if output_dir is None:
+        output_dir = os.getcwd()
+
+    payload_out_base = report.__class__.__name__ + "_handin"
+
+    obtain, possible = results['total']
+    vstring = f"_v{report.version}" if report.version is not None else ""
+    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)
+    token = os.path.normpath(os.path.join(output_dir, token))
+
+    save_token(results, "\n".join(s_include), token)
+
+    if not args.autolab:
+        print("> Testing token file integrity...", sep="")
+        load_token(token)
+        print("Done!")
+        print(" ")
+        print("To get credit for your results, please upload the single unmodified file: ")
+        print(">", token)
+
+
+def save_token(dictionary, plain_text, file_out):
+    if plain_text is None:
+        plain_text = ""
+    if len(plain_text) == 0:
+        plain_text = "Start token file"
+    plain_text = plain_text.strip()
+    b, b_hash = dict2picklestring(dictionary)
+    b_l1 = len(b)
+    b = "."+b+"."
+    b = "\n".join( textwrap.wrap(b, 180))
+
+    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]
+    with open(file_out, 'w') as f:
+        f.write("\n".join(out))
+
+
+
+
+def source_instantiate(name, report1_source, payload):
+    # print("Executing sources", report1_source)
+    eval("exec")(report1_source, globals())
+    # print("Loaind gpayload..")
+    pl = pickle.loads(bytes.fromhex(payload))
+    report = eval(name)(payload=pl, strict=True)
+    return report
+
+
+
+report1_source = '# from unitgrade import hide\n# from unitgrade import utils\n# import os\n# import lzma\n# import pickle\n\n# DONT\'t import stuff here since install script requires __version__\n\n# def cache_write(object, file_name, verbose=True):\n#     # raise Exception("bad")\n#     # import compress_pickle\n#     dn = os.path.dirname(file_name)\n#     if not os.path.exists(dn):\n#         os.mkdir(dn)\n#     if verbose: print("Writing cache...", file_name)\n#     with lzma.open(file_name, \'wb\', ) as f:\n#         pickle.dump(object, f)\n#     if verbose: print("Done!")\n#\n#\n# def cache_exists(file_name):\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     return os.path.exists(file_name)\n#\n#\n# def cache_read(file_name):\n#     # import compress_pickle # Import here because if you import in top the __version__ tag will fail.\n#     # file_name = cn_(file_name) if cache_prefix else file_name\n#     if os.path.exists(file_name):\n#         try:\n#             with lzma.open(file_name, \'rb\') as f:\n#                 return pickle.load(f)\n#         except Exception as e:\n#             print("Tried to load a bad pickle file at", file_name)\n#             print("If the file appears to be automatically generated, you can try to delete it, otherwise download a new version")\n#             print(e)\n#             # return pickle.load(f)\n#     else:\n#         return None\n\n\n\nimport re\nimport sys\nimport threading\nimport time\nimport lzma\nimport hashlib\nimport pickle\nimport base64\nfrom collections import namedtuple\nfrom io import StringIO\nimport numpy as np\nimport tqdm\nfrom colorama import Fore\nfrom functools import _make_key\n\n_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])\n\n\ndef gprint(s):\n    print(f"{Fore.LIGHTGREEN_EX}{s}")\n\n\nmyround = lambda x: np.round(x)  # required for obfuscation.\nmsum = lambda x: sum(x)\nmfloor = lambda x: np.floor(x)\n\n\n"""\nClean up the various output-related helper classes.\n"""\nclass Logger(object):\n    def __init__(self, buffer, write_to_stdout=True):\n        # assert False\n        self.terminal = sys.stdout\n        self.write_to_stdout = write_to_stdout\n        self.log = buffer\n\n    def write(self, message):\n        if self.write_to_stdout:\n            self.terminal.write(message)\n        self.log.write(message)\n\n    def flush(self):\n        # this flush method is needed for python 3 compatibility.\n        pass\n\n\nclass Capturing(list):\n    def __init__(self, *args, stdout=None, unmute=False, **kwargs):\n        self._stdout = stdout\n        self.unmute = unmute\n        super().__init__(*args, **kwargs)\n\n    def __enter__(self, capture_errors=True):  # don\'t put arguments here.\n        self._stdout = sys.stdout if self._stdout == None else self._stdout\n        self._stringio = StringIO()\n        if self.unmute:\n            sys.stdout = Logger(self._stringio)\n        else:\n            sys.stdout = self._stringio\n\n        if capture_errors:\n            self._sterr = sys.stderr\n            sys.sterr = StringIO()  # memory hole it\n        self.capture_errors = capture_errors\n        return self\n\n    def __exit__(self, *args):\n        self.extend(self._stringio.getvalue().splitlines())\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n\nclass Capturing2(Capturing):\n    def __exit__(self, *args):\n        lines = self._stringio.getvalue().splitlines()\n        txt = "\\n".join(lines)\n        numbers = extract_numbers(rm_progress_bar(txt))\n        self.extend(lines)\n        del self._stringio  # free up some memory\n        sys.stdout = self._stdout\n        if self.capture_errors:\n            sys.sterr = self._sterr\n\n        self.output = txt\n        self.numbers = numbers\n\n\ndef rm_progress_bar(txt):\n    # More robust version. Apparently length of bar can depend on various factors, so check for order of symbols.\n    nlines = []\n    for l in txt.splitlines():\n        pct = l.find("%")\n        ql = False\n        if pct > 0:\n            i = l.find("|", pct + 1)\n            if i > 0 and l.find("|", i + 1) > 0:\n                ql = True\n        if not ql:\n            nlines.append(l)\n    return "\\n".join(nlines)\n\n\nclass ActiveProgress():\n    def __init__(self, t, start=True, title="my progress bar", show_progress_bar=True, file=None, mute_stdout=False):\n        if file == None:\n            file = sys.stdout\n        self.file = file\n        self.mute_stdout = mute_stdout\n        self._running = False\n        self.title = title\n        self.dt = 0.025\n        self.n = max(1, int(np.round(t / self.dt)))\n        self.show_progress_bar = show_progress_bar\n        self.pbar = None\n\n        if start:\n            self.start()\n\n    def start(self):\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            self._stdout = sys.stdout\n            sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        self._running = True\n        if self.show_progress_bar:\n            self.thread = threading.Thread(target=self.run)\n            self.thread.start()\n        self.time_started = time.time()\n\n    def terminate(self):\n        if not self._running:\n            print("Stopping a progress bar which is not running (class unitgrade.utils.ActiveProgress")\n            pass\n            # raise Exception("Stopping a stopped progress bar. ")\n        self._running = False\n        if self.show_progress_bar:\n            self.thread.join()\n        if self.pbar is not None:\n            self.pbar.update(1)\n            self.pbar.close()\n            self.pbar = None\n\n        self.file.flush()\n\n        if self.mute_stdout:\n            import io\n            # from unitgrade.utils import Logger\n            sys.stdout = self._stdout #= sys.stdout\n\n            # sys.stdout = Logger(io.StringIO(), write_to_stdout=False)\n\n        return time.time() - self.time_started\n\n    def run(self):\n        self.pbar = tqdm.tqdm(total=self.n, file=self.file, position=0, leave=False, desc=self.title, ncols=100,\n                              bar_format=\'{l_bar}{bar}| [{elapsed}<{remaining}]\')\n        t_ = time.time()\n        for _ in range(self.n - 1):  # Don\'t terminate completely; leave bar at 99% done until terminate.\n            if not self._running:\n                self.pbar.close()\n                self.pbar = None\n                break\n            tc = time.time()\n            tic = max(0, self.dt - (tc - t_))\n            if tic > 0:\n                time.sleep(tic)\n            t_ = time.time()\n            self.pbar.update(1)\n\n\ndef dprint(first, last, nL, extra = "", file=None, dotsym=\'.\', color=\'white\'):\n    if file == None:\n        file = sys.stdout\n    dot_parts = (dotsym * max(0, nL - len(last) - len(first)))\n    print(first + dot_parts, end="", file=file)\n    last += extra\n    print(last, file=file)\n\n\ndef hide(func):\n    return func\n\n\ndef makeRegisteringDecorator(foreignDecorator):\n    """\n        Returns a copy of foreignDecorator, which is identical in every\n        way(*), except also appends a .decorator property to the callable it\n        spits out.\n    """\n\n    def newDecorator(func):\n        # Call to newDecorator(method)\n        # Exactly like old decorator, but output keeps track of what decorated it\n        R = foreignDecorator(func)  # apply foreignDecorator, like call to foreignDecorator(method) would have done\n        R.decorator = newDecorator  # keep track of decorator\n        # R.original = func         # might as well keep track of everything!\n        return R\n\n    newDecorator.__name__ = foreignDecorator.__name__\n    newDecorator.__doc__ = foreignDecorator.__doc__\n    return newDecorator\n\n\nhide = makeRegisteringDecorator(hide)\n\n\ndef extract_numbers(txt):\n    numeric_const_pattern = r\'[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\'\n    rx = re.compile(numeric_const_pattern, re.VERBOSE)\n    all = rx.findall(txt)\n    all = [float(a) if (\'.\' in a or "e" in a) else int(a) for a in all]\n    if len(all) > 500:\n        print(txt)\n        raise Exception("unitgrade_v1.unitgrade_v1.py: Warning, too many numbers!", len(all))\n    return all\n\n\ndef cache(foo, typed=False):\n    """ Magic cache wrapper\n    https://github.com/python/cpython/blob/main/Lib/functools.py\n    """\n    maxsize = None\n    def wrapper(self, *args, **kwargs):\n        key = (self.cache_id(), ("@cache", foo.__name__, _make_key(args, kwargs, typed)))\n        # print(self._cache.keys())\n        # for k in self._cache:\n        #     print(k)\n        if not self._cache_contains(key):\n            value = foo(self, *args, **kwargs)\n            self._cache_put(key, value)\n        else:\n            value = self._cache_get(key)\n            # This appears to be required since there are two caches. Otherwise, when deploy method is run twice,\n            # the cache will not be set correctly.\n            self._cache_put(key, value)\n        return value\n\n    return wrapper\n\n\ndef methodsWithDecorator(cls, decorator):\n    """\n        Returns all methods in CLS with DECORATOR as the\n        outermost decorator.\n\n        DECORATOR must be a "registering decorator"; one\n        can make any decorator "registering" via the\n        makeRegisteringDecorator function.\n\n        import inspect\n        ls = list(methodsWithDecorator(GeneratorQuestion, deco))\n        for f in ls:\n            print(inspect.getsourcelines(f) ) # How to get all hidden questions.\n    """\n    for maybeDecorated in cls.__dict__.values():\n        if hasattr(maybeDecorated, \'decorator\'):\n            if maybeDecorated.decorator == decorator:\n                print(maybeDecorated)\n                yield maybeDecorated\n\n\n""" Methods responsible for turning a dictionary into a string that can be pickled or put into a json file. """\ndef dict2picklestring(dd):\n    """\n    Turns a dictionary into a string with some compression.\n\n    :param dd:\n    :return:\n    """\n    b = lzma.compress(pickle.dumps(dd))\n    b_hash = hashlib.blake2b(b).hexdigest()\n    return base64.b64encode(b).decode("utf-8"), b_hash\n\ndef picklestring2dict(picklestr):\n    """ Reverse of the above method: Turns the string back into a dictionary. """\n    b = base64.b64decode(picklestr)\n    hash = hashlib.blake2b(b).hexdigest()\n    dictionary = pickle.loads(lzma.decompress(b))\n    return dictionary, hash\n\ntoken_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70\ndef load_token(file_in):\n    """ We put this one here to allow loading of token files for the dashboard. """\n    with open(file_in, \'r\') as f:\n        s = f.read()\n    splt = s.split(token_sep)\n    data = splt[-1]\n    info = splt[-2]\n    head = token_sep.join(splt[:-2])\n    plain_text=head.strip()\n    hash, l1 = info.split(" ")\n    data = "".join( data.strip()[1:-1].splitlines() )\n    l1 = int(l1)\n    dictionary, b_hash = picklestring2dict(data)\n    assert len(data) == l1\n    assert b_hash == hash.strip()\n    return dictionary, plain_text\n\nimport io\nimport sys\nimport time\nimport unittest\nfrom unittest.runner import _WritelnDecorator\nimport numpy as np\n\n\nclass UTextResult(unittest.TextTestResult):\n    nL = 80\n    number = -1  # HAcky way to set question number.\n    show_progress_bar = True\n    unmute = False # Whether to redirect stdout.\n    cc = None\n    setUpClass_time = 3 # Estimated time to run setUpClass in TestCase. Must be set externally. See key (("ClassName", "setUpClass"), "time") in _cache.\n\n    def __init__(self, stream, descriptions, verbosity):\n        super().__init__(stream, descriptions, verbosity)\n        self.successes = []\n\n    def printErrors(self) -> None:\n        # TODO: Fix here. probably also needs to flush stdout.\n        self.printErrorList(\'ERROR\', [(test, res[\'stderr\']) for test, res in self.errors])\n        self.printErrorList(\'FAIL\',  [(test, res[\'stderr\']) for test, res in self.failures])\n\n    def addError(self, test, err):\n        super(unittest.TextTestResult, self).addError(test, err)\n        err = self.errors[-1][1]\n        if hasattr(sys.stdout, \'log\'):\n            stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        else:\n            stdout = ""\n        self.errors[-1] = (self.errors[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n\n        if not hasattr(self, \'item_title_print\'):\n            # In case setUpClass() fails with an error the short description may not be set. This will fix that problem.\n            self.item_title_print = test.shortDescription()\n            if self.item_title_print is None:  # In case the short description is not set either...\n                self.item_title_print = test.id()\n\n\n        self.cc_terminate(success=False)\n\n    def addFailure(self, test, err):\n        super(unittest.TextTestResult, self).addFailure(test, err)\n        err = self.failures[-1][1]\n        stdout = sys.stdout.log.readlines()  # Only works because we set sys.stdout to a unitgrade.Logger\n        self.failures[-1] = (self.failures[-1][0], {\'return\': None,\n                                \'stderr\': err,\n                                \'stdout\': stdout\n                                })\n        self.cc_terminate(success=False)\n\n\n    def addSuccess(self, test: unittest.case.TestCase) -> None:\n        msg = None\n        stdout = sys.stdout.log.readlines() # Only works because we set sys.stdout to a unitgrade.Logger\n\n        if hasattr(test, \'_get_outcome\'):\n            o = test._get_outcome()\n            if isinstance(o, dict):\n                key = (test.cache_id(), "return")\n                if key in o:\n                    msg = test._get_outcome()[key]\n\n        # print(sys.stdout.readlines())\n        self.successes.append((test, None))  # (test, message) (to be consistent with failures and errors).\n        self.successes[-1] = (self.successes[-1][0], {\'return\': msg,\n                                 \'stdout\': stdout,\n                                 \'stderr\': None})\n\n        self.cc_terminate()\n\n    def cc_terminate(self, success=True):\n        if self.show_progress_bar or True:\n            tsecs = np.round(self.cc.terminate(), 2)\n            self.cc.file.flush()\n            ss = self.item_title_print\n\n            state = "PASS" if success else "FAILED"\n\n            dot_parts = (\'.\' * max(0, self.nL - len(state) - len(ss)))\n            if self.show_progress_bar or True:\n                print(self.item_title_print + dot_parts, end="", file=self.cc.file)\n            else:\n                print(dot_parts, end="", file=self.cc.file)\n\n            if tsecs >= 0.5:\n                state += " (" + str(tsecs) + " seconds)"\n            print(state, file=self.cc.file)\n\n    def startTest(self, test):\n        name = test.__class__.__name__\n        if self.testsRun == 0 and hasattr(test.__class__, \'_cache2\'): # Disable this if the class is pure unittest.TestCase\n            # This is the first time we are running a test. i.e. we can time the time taken to call setupClass.\n            if test.__class__._cache2 is None:\n                test.__class__._cache2 = {}\n            test.__class__._cache2[((name, \'setUpClass\'), \'time\')] = time.time() - self.t_start\n\n        self.testsRun += 1\n        item_title = test.shortDescription()  # Better for printing (get from cache).\n\n        if item_title == None:\n            # For unittest framework where getDescription may return None.\n            item_title = self.getDescription(test)\n        self.item_title_print = " * q%i.%i) %s" % (UTextResult.number + 1, self.testsRun, item_title)\n        # if self.show_progress_bar or True:\n        estimated_time = test.__class__._cache.get(((name, test._testMethodName), \'time\'), 100) if hasattr(test.__class__, \'_cache\') else 4\n        self.cc = ActiveProgress(t=estimated_time, title=self.item_title_print, show_progress_bar=self.show_progress_bar)\n        # else:\n        #     print(self.item_title_print + (\'.\' * max(0, self.nL - 4 - len(self.item_title_print))), end="")\n        self._test = test\n        # if not self.unmute:\n        self._stdout = sys.stdout # Redundant. remove later.\n        sys.stdout = Logger(io.StringIO(), write_to_stdout=self.unmute)\n\n    def stopTest(self, test):\n        # if not self.unmute:\n        buff = sys.stdout.log\n        sys.stdout = self._stdout # redundant.\n        buff.close()\n        super().stopTest(test)\n\n    def _setupStdout(self):\n        if self._previousTestClass == None:\n            self.t_start = time.time()\n            if hasattr(self.__class__, \'q_title_print\'):\n                q_title_print = self.__class__.q_title_print\n            else:\n                q_title_print = "<unnamed test. See unitgrade.framework.py>"\n\n            cc = ActiveProgress(t=self.setUpClass_time, title=q_title_print, show_progress_bar=self.show_progress_bar, mute_stdout=not self.unmute)\n            self.cc = cc\n\n\n    def _restoreStdout(self):  # Used when setting up the test.\n        if self._previousTestClass is None:\n            q_time = self.cc.terminate()\n            q_time = np.round(q_time, 2)\n            sys.stdout.flush()\n            if self.show_progress_bar:\n                print(self.cc.title, end="")\n            print(" " * max(0, self.nL - len(self.cc.title)) + (" (" + str(q_time) + " seconds)" if q_time >= 0.5 else ""))\n\n\nclass UTextTestRunner(unittest.TextTestRunner):\n    def __init__(self, *args, **kwargs):\n        stream = io.StringIO()\n        super().__init__(*args, stream=stream, **kwargs)\n\n    def _makeResult(self):\n        # stream = self.stream # not you!\n        stream = sys.stdout\n        stream = _WritelnDecorator(stream)\n        return self.resultclass(stream, self.descriptions, self.verbosity)\n\nimport importnb\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport inspect\nimport colorama\nimport unittest\nimport time\nimport textwrap\nimport urllib.parse\nimport requests\nimport ast\nimport numpy\n\ncolorama.init(autoreset=True)  # auto resets your settings after every output\nnumpy.seterr(all=\'raise\')\n\ndef setup_dir_by_class(C, base_dir):\n    name = C.__class__.__name__\n    return base_dir, name\n\n\n\n\nclass classmethod_dashboard(classmethod):\n    def __init__(self, f):\n        # t = UTestCase()\n        # t._artifact_file_for_setUpClass()\n        def dashboard_wrap(cls: UTestCase):\n            if not cls._generate_artifacts:\n                f(cls)\n                return\n\n            from pupdb.core import PupDB\n            print("Opening as DB:", cls._artifact_file_for_setUpClass())\n            db = PupDB(cls._artifact_file_for_setUpClass())\n\n            db.set(\'run_id\', np.random.randint(1000 * 1000))\n            db.set("state", "running")\n            db.set(\'coverage_files_changed\', None)\n\n            state_ = \'fail\'\n            try:\n                _stdout = sys.stdout\n                _stderr = sys.stderr\n                std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n                # Run this unittest and record all of the output.\n                # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n                # sys.stdout = stdout_capture\n                sys.stderr = std_capture.dummy_stderr\n                sys.stdout = std_capture.dummy_stdout\n\n                f(cls)\n                state_ = \'pass\'\n                # result_ = TestCase.run(self, result)\n\n                from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n                # print(result_._excinfo[0])\n                actual_errors = []\n                # for test, err in self._error_fed_during_run:\n                #     break\n                #     if err is None:\n                #         continue\n                #     else:\n                #         import traceback\n                #         # traceback.print_tb(err[2])\n                #         actual_errors.append(err)\n\n                if len(actual_errors) > 0:\n                    ex, exi, tb = actual_errors[0]\n                    exi.__traceback__ = tb\n                    dbt = DebugTraceback(exi)\n                    sys.stderr.write(dbt.render_traceback_text())\n                    html = dbt.render_traceback_html(include_title="hello world")\n                    db.set(\'wz_stacktrace\', html)\n                    state_ = \'fail\'\n                    # db.set(\'state\', \'fail\')\n                else:\n                    # state\n                    state_ = \'pass\'\n                    # db.set(\'state\', \'pass\')\n            except Exception as e:\n                print("-----------------.///////////////////////////////////////////////////////////////")\n                # db.set("state", "fail")\n                state_ = \'fail\'\n                db.set(\'state\', state_)\n                # ex, exi, tb = actual_errors[0]\n                exi = e\n                # exi.__traceback__ = e.__traceback__\n\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n\n                # import traceback\n                # traceback.print_exc()\n                raise e\n            finally:\n                # state_ = \'fail\'\n                db.set(\'state\', state_)\n                sys.stdout = _stdout\n                sys.stderr = _stderr\n                std_capture.close()\n        super().__init__(dashboard_wrap)\n\nclass Report:\n    title = "report title"\n    abbreviate_questions = False # Should the test items start with \'Question ...\' or just be q1).\n    version = None # A version number of the report (1.0). Used to compare version numbers with online resources.\n    url = None  # Remote location of this problem.\n\n    questions = []\n    pack_imports = []\n    individual_imports = []\n\n    _remote_check_cooldown_seconds = 1  # Seconds between remote check of report.\n    nL = 120  # Maximum line width\n    _config = None  # Private variable. Used when collecting results from student computers. Should only be read/written by teacher and never used for regular evaluation.\n    _setup_mode = False # True if test is being run in setup-mode, i.e. will not fail because of bad configurations, etc.\n\n    @classmethod\n    def reset(cls):\n        for (q, _) in cls.questions:\n            if hasattr(q, \'reset\'):\n                q.reset()\n\n    @classmethod\n    def mfile(clc):\n        return inspect.getfile(clc)\n\n    def _file(self):\n        return inspect.getfile(type(self))\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self._file()), "unitgrade_data/main_config_"+ os.path.basename(self._file()[:-3]) + ".json")\n\n\n    def _is_run_in_grade_mode(self):\n        """ True if this report is being run as part of a grade run. """\n        return self._file().endswith("_grade.py") # Not sure I love this convention.\n\n    def _import_base_relative(self):\n        if hasattr(self.pack_imports[0], \'__path__\'):\n            root_dir = self.pack_imports[0].__path__[0]\n        else:\n            root_dir = self.pack_imports[0].__file__\n\n        root_dir = os.path.dirname(root_dir)\n        relative_path = os.path.relpath(self._file(), root_dir)\n        modules = os.path.normpath(relative_path[:-3]).split(os.sep)\n        relative_path = relative_path.replace("\\\\", "/")\n\n        return root_dir, relative_path, modules\n\n    def __init__(self, strict=False, payload=None):\n        working_directory = os.path.abspath(os.path.dirname(self._file()))\n        self.wdir, self.name = setup_dir_by_class(self, working_directory)\n        # self.computed_answers_file = os.path.join(self.wdir, self.name + "_resources_do_not_hand_in.dat")\n        for (q, _) in self.questions:\n            q.nL = self.nL  # Set maximum line length.\n\n        if payload is not None:\n            self.set_payload(payload, strict=strict)\n\n    def main(self, verbosity=1):\n        # Run all tests using standard unittest (nothing fancy).\n        loader = unittest.TestLoader()\n        for q, _ in self.questions:\n            start = time.time()  #\n            suite = loader.loadTestsFromTestCase(q)\n            unittest.TextTestRunner(verbosity=verbosity).run(suite)\n            total = time.time() - start\n            q.time = total\n\n    def _setup_answers(self, with_coverage=False, verbose=True):\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = True\n                q._report = self\n        for q, _ in self.questions:\n            q._setup_answers_mode = True\n            # q._generate_artifacts = False # Disable artifact generation when the report is being set up.\n\n        evaluate_report_student(self, unmute=verbose, noprogress=not verbose, generate_artifacts=False) # Disable artifact generation.\n\n        # self.main()  # Run all tests in class just to get that out of the way...\n        report_cache = {}\n        for q, _ in self.questions:\n            # print(self.questions)\n            if hasattr(q, \'_save_cache\'):\n                q()._save_cache()\n                # print("q is", q())\n                report_cache[q.__qualname__] = q._cache2\n            else:\n                report_cache[q.__qualname__] = {\'no cache see _setup_answers in framework.py\': True}\n        if with_coverage:\n            for q, _ in self.questions:\n                q._with_coverage = False\n\n        # report_cache is saved on a per-question basis.\n        # it could also contain additional information such as runtime metadata etc. This may not be appropriate to store with the invidivual questions(?).\n        # In this case, the function should be re-defined.\n        # for q, _ in self.questions:\n        #     q._setup_answers_mode = False\n        #     q._generate_artifacts = True # Disable artifact generation when the report is being set up.\n        return report_cache\n\n    def set_payload(self, payloads, strict=False):\n        for q, _ in self.questions:\n            q._cache = payloads[q.__qualname__]\n        self._config = payloads[\'config\']\n\n    def _check_remote_versions(self):\n        if self.url is None:\n            return\n        url = self.url\n        if not url.endswith("/"):\n            url += "/"\n        snapshot_file = os.path.dirname(self._file()) + "/unitgrade_data/.snapshot"\n        # print("Sanity checking time using snapshot", snapshot_file)\n        # print("and using self-identified file", self._file())\n\n        if os.path.isfile(snapshot_file):\n            with open(snapshot_file, \'r\') as f:\n                t = f.read()\n                if (time.time() - float(t)) < self._remote_check_cooldown_seconds:\n                    return\n        # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n\n        if self.url.startswith("https://gitlab"):\n            # Try to turn url into a \'raw\' format.\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            # url = self.url\n            url = url.replace("-/tree", "-/raw")\n            # print(url)\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/tree/master/examples/autolab_example_py_upload/instructor/cs102_autolab"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/report2_test.py?inline=false"\n            # "https://gitlab.compute.dtu.dk/tuhe/unitgrade_private/-/raw/master/examples/autolab_example_py_upload/instructor/cs102_autolab/report2_test.py?inline=false"\n            raw_url = urllib.parse.urljoin(url, os.path.basename(self._file()) + "?inline=false")\n            # print("Is this file run in local mode?", self._is_run_in_grade_mode())\n            if self._is_run_in_grade_mode():\n                remote_source = requests.get(raw_url).text\n                with open(self._file(), \'r\') as f:\n                    local_source = f.read()\n                if local_source != remote_source:\n                    print("\\nThe local version of this report is not identical to the remote version which can be found at")\n                    print(self.url)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of grade script does not match the remote version. Please update using git pull")\n            else:\n                text = requests.get(raw_url).text\n                node = ast.parse(text)\n                classes = [n for n in node.body if isinstance(n, ast.ClassDef) if n.name == self.__class__.__name__][0]\n                for b in classes.body:\n                    # print(b.)\n                    if b.targets[0].id == "version":\n                        # print(b)\n                        # print(b.value)\n                        version_remote = b.value.value\n                        break\n                if version_remote != self.version:\n                    print("\\nThe version of this report", self.version, "does not match the version of the report on git", version_remote)\n                    print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                    print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                    print("This can be done by running the command")\n                    print("> git pull")\n                    print("You can find the most recent code here:")\n                    print(self.url)\n                    raise Exception(f"Version of test on remote is {version_remote}, which is different than this version of the test {self.version}. Please update your test to the most recent version.")\n\n                for (q,_) in self.questions:\n                    qq = q(skip_remote_check=True)\n                    cfile = q._cache_file()\n\n                    relpath = os.path.relpath(cfile, os.path.dirname(self._file()))\n                    relpath = relpath.replace("\\\\", "/")\n                    raw_url = urllib.parse.urljoin(url, relpath + "?inline=false")\n                    # requests.get(raw_url)\n\n                    with open(cfile, \'rb\') as f:\n                        b1 = f.read()\n\n                    b2 = requests.get(raw_url).content\n                    if b1 != b2:\n                        print("\\nQuestion ", qq.title, "relies on the data file", cfile)\n                        print("However, it appears that this file is missing or in a different version than the most recent found here:")\n                        print(self.url)\n                        print("The most likely reason for this is that the remote version was updated by the teacher due to some issue.")\n                        print("You should check if there was an announcement and update the test to the most recent version; most likely")\n                        print("This can be done by simply running the command")\n                        print("> git pull")\n                        print("to avoid running bad tests against good code, the program will now stop. Please update and good luck!")\n                        raise Exception("The data file for the question", qq.title, "did not match remote source found on git. The test will therefore automatically fail. Please update your test/data files.")\n\n                t = time.time()\n                if os.path.isdir(os.path.dirname(self._file()) + "/unitgrade_data"):\n                    with open(snapshot_file, \'w\') as f:\n                        f.write(f"{t}")\n\ndef get_hints(ss):\n    """ Extract all blocks of the forms:\n\n    Hints:\n    bla-bla.\n\n    and returns the content unaltered.\n    """\n    if ss == None:\n        return None\n    try:\n        ss = textwrap.dedent(ss)\n        ss = ss.replace(\'\'\'"""\'\'\', "").strip()\n        hints = ["hints:", "hint:"]\n        indexes = [ss.lower().find(h) for h in hints]\n        j = np.argmax(indexes)\n        if indexes[j] == -1:\n            return None\n        h = hints[j]\n        ss = ss[ss.lower().find(h) + len(h) + 1:]\n        ss = "\\n".join([l for l in ss.split("\\n") if not l.strip().startswith(":")])\n        ss = textwrap.dedent(ss).strip()\n        # if ss.startswith(\'*\'):\n        #     ss = ss[1:].strip()\n        return ss\n    except Exception as e:\n        print("bad hints", ss, e)\n\n\nclass UTestCase(unittest.TestCase):\n    # a = 234\n    _outcome = None  # A dictionary which stores the user-computed outcomes of all the tests. This differs from the cache.\n    _cache = None  # Read-only cache. Ensures method always produce same result.\n    _cache2 = None  # User-written cache.\n    _with_coverage = False\n    _covcache = None # Coverage cache. Written to if _with_coverage is true.\n    _report = None  # The report used. This is very, very hacky and should always be None. Don\'t rely on it!\n    _run_in_report_mode = True\n\n    _generate_artifacts = True # Whether the file will generate the artifact .json files. This is used in the _grade-script mode.\n    # If true, the tests will not fail when cache is used. This is necesary since otherwise the cache will not be updated\n    # during setup, and the deploy script must be run many times.\n    _setup_answers_mode = False\n\n\n\n    def capture(self):\n        if hasattr(self, \'_stdout\') and self._stdout is not None:\n            file = self._stdout\n        else:\n            file = sys.stdout\n        return Capturing2(stdout=file)\n\n    @classmethod\n    def question_title(cls):\n        """ Return the question title """\n        if cls.__doc__ is not None:\n            title = cls.__doc__.strip().splitlines()[0].strip()\n            if not (title.startswith("Hints:") or title.startswith("Hint:") ):\n                return title\n        return cls.__qualname__\n\n    def run(self, result):\n        # print("Run called in test framework...", self._generate_artifacts)\n        if not self._generate_artifacts:\n            return super().run(result)\n        from unittest.case import TestCase\n        from pupdb.core import PupDB\n\n        db = PupDB(self._artifact_file())\n        db.set(\'run_id\', np.random.randint(1000*1000))\n        db.set("state", "running")\n        db.set(\'coverage_files_changed\', None)\n\n        # print("Re-running test")\n        _stdout = sys.stdout\n        _stderr = sys.stderr\n\n        std_capture = StdCapturing(stdout=sys.stdout, stderr=sys.stderr, db=db, mute=False)\n\n        # stderr_capture = StdCapturing(sys.stderr, db=db)\n        # std_err_capture = StdCapturing(sys.stderr, "stderr", db=db)\n\n        try:\n            # Run this unittest and record all of the output.\n            # This is probably where we should hijack the stdout output and save it -- after all, this is where the test is actually run.\n            # sys.stdout = stdout_capture\n            sys.stderr = std_capture.dummy_stderr\n            sys.stdout = std_capture.dummy_stdout\n\n            result_ = TestCase.run(self, result)\n\n            from werkzeug.debug.tbtools import DebugTraceback, _process_traceback\n            # print(result_._excinfo[0])\n            actual_errors = []\n            for test, err in self._error_fed_during_run:\n                if err is None:\n                    continue\n                else:\n                    import traceback\n                    # traceback.print_tb(err[2])\n                    actual_errors.append(err)\n\n            if len(actual_errors) > 0:\n                ex, exi, tb = actual_errors[0]\n                exi.__traceback__ = tb\n                dbt = DebugTraceback(exi)\n                sys.stderr.write(dbt.render_traceback_text())\n                html = dbt.render_traceback_html(include_title="hello world")\n                db.set(\'wz_stacktrace\', html)\n                db.set(\'state\', \'fail\')\n            else:\n                db.set(\'state\', \'pass\')\n        except Exception as e:\n            print("-----------------.///////////////////////////////////////////////////////////////")\n            # print(e)\n            import traceback\n            traceback.print_exc()\n            raise e\n        finally:\n            sys.stdout = _stdout\n            sys.stderr = _stderr\n            std_capture.close()\n        return result_\n\n    def _callSetUp(self):\n        if self._with_coverage:\n            if self._covcache is None:\n                self._covcache = {}\n            import coverage\n            self.cov = coverage.Coverage(data_file=None)\n            self.cov.start()\n        self.setUp()\n\n    def _callTearDown(self):\n        self.tearDown()\n        # print("Teardown.")\n        if self._with_coverage:\n            # print("with cov")\n            from pathlib import Path\n            from snipper import snipper_main\n            try:\n                self.cov.stop()\n            except Exception as e:\n                print("Something went wrong while tearing down coverage test")\n                print(e)\n            data = self.cov.get_data()\n            base, _, _ = self._report._import_base_relative()\n            for file in data.measured_files():\n                file = os.path.normpath(file)\n                root = Path(base)\n                child = Path(file)\n                if root in child.parents:\n                    # print("Reading file", child)\n                    with open(child, \'r\') as f:\n                        s = f.read()\n                    lines = s.splitlines()\n                    garb = \'GARBAGE\'\n                    lines2 = snipper_main.censor_code(lines, keep=True)\n                    # print("\\n".join(lines2))\n                    if len(lines) != len(lines2):\n                        for k in range(len(lines)):\n                            print(k, ">", lines[k], "::::::::", lines2[k])\n                        print("Snipper failure; line lenghts do not agree. Exiting..")\n                        print(child, "len(lines) == len(lines2)", len(lines), len(lines2))\n                        import sys\n                        sys.exit()\n\n                    assert len(lines) == len(lines2)\n                    for ll in data.contexts_by_lineno(file):\n                        l = ll-1\n                        if l < len(lines2) and lines2[l].strip() == garb:\n                            # print("Got a hit at l", l)\n                            rel = os.path.relpath(child, root)\n                            cc = self._covcache\n                            j = 0\n                            for j in range(l, -1, -1):\n                                if "def" in lines2[j] or "class" in lines2[j]:\n                                    break\n                            from snipper.legacy import gcoms\n\n                            fun = lines2[j]\n                            comments, _ = gcoms("\\n".join(lines2[j:l]))\n                            if rel not in cc:\n                                cc[rel] = {}\n                            cc[rel][fun] = (l, "\\n".join(comments))\n                            # print("found", rel, fun)\n                            self._cache_put((self.cache_id(), \'coverage\'), self._covcache)\n\n    def shortDescriptionStandard(self):\n        sd = super().shortDescription()\n        if sd is None or sd.strip().startswith("Hints:") or sd.strip().startswith("Hint:"):\n            sd = self._testMethodName\n        return sd\n\n    def shortDescription(self):\n        sd = self.shortDescriptionStandard()\n        title = self._cache_get((self.cache_id(), \'title\'), sd)\n        return title if title is not None else sd\n\n    @property\n    def title(self):\n        return self.shortDescription()\n\n    @title.setter\n    def title(self, value):\n        self._cache_put((self.cache_id(), \'title\'), value)\n\n    def _get_outcome(self):\n        if not hasattr(self.__class__, \'_outcome\') or self.__class__._outcome is None:\n            self.__class__._outcome = {}\n        return self.__class__._outcome\n\n    def _callTestMethod(self, testMethod):\n        t = time.time()\n        self._ensure_cache_exists()  # Make sure cache is there.\n        if self._testMethodDoc is not None:\n            self._cache_put((self.cache_id(), \'title\'), self.shortDescriptionStandard())\n\n        self._cache2[(self.cache_id(), \'assert\')] = {}\n        res = testMethod()\n        elapsed = time.time() - t\n        self._get_outcome()[ (self.cache_id(), "return") ] = res\n        self._cache_put((self.cache_id(), "time"), elapsed)\n\n\n    def cache_id(self):\n        c = self.__class__.__qualname__\n        m = self._testMethodName\n        return c, m\n\n    def __init__(self, *args, skip_remote_check=False, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_cache()\n        self._assert_cache_index = 0\n        # Perhaps do a sanity check here to see if the cache is up to date? To do that, we must make sure the\n        # cache exists locally.\n        # Find the report class this class is defined within.\n        if skip_remote_check:\n            return\n        import importlib, inspect\n        found_reports = []\n        # print("But do I have report", self._report)\n        # print("I think I am module", self.__module__)\n        # print("Importlib says", importlib.import_module(self.__module__))\n        # This will delegate you to the wrong main clsas when running in grade mode.\n        for name, cls in inspect.getmembers(importlib.import_module(self.__module__), inspect.isclass):\n            # print("checking", cls)\n            if issubclass(cls, Report):\n                for q,_ in cls.questions:\n                    if q == self.__class__:\n                        found_reports.append(cls)\n        if len(found_reports) == 0:\n            pass # This case occurs when the report _grade script is being run.\n            # raise Exception("This question is not a member of a report. Very, very odd.")\n        if len(found_reports) > 1:\n            raise Exception("This question is a member of multiple reports. That should not be the case -- don\'t get too creative.")\n        if len(found_reports) > 0:\n            report = found_reports[0]\n            report()._check_remote_versions()\n\n\n    def _ensure_cache_exists(self):\n        if not hasattr(self.__class__, \'_cache\') or self.__class__._cache == None:\n            self.__class__._cache = dict()\n        if not hasattr(self.__class__, \'_cache2\') or self.__class__._cache2 == None:\n            self.__class__._cache2 = dict()\n\n    def _cache_get(self, key, default=None):\n        self._ensure_cache_exists()\n        return self.__class__._cache.get(key, default)\n\n    def _cache_put(self, key, value):\n        self._ensure_cache_exists()\n        self.__class__._cache2[key] = value\n\n    def _cache_contains(self, key):\n        self._ensure_cache_exists()\n        return key in self.__class__._cache\n\n    def get_expected_test_value(self):\n        key = (self.cache_id(), \'assert\')\n        id = self._assert_cache_index\n        cache = self._cache_get(key)\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        return _expected\n\n    def wrap_assert(self, assert_fun, first, *args, **kwargs):\n        key = (self.cache_id(), \'assert\')\n        if not self._cache_contains(key):\n            print("Warning, framework missing", key)\n            self.__class__._cache[key] = {}  # A new dict. We manually insert it because we have to use that the dict is mutable.\n        cache = self._cache_get(key)\n        id = self._assert_cache_index\n        _expected = cache.get(id, f"Key {id} not found in cache; framework files missing. Please run deploy()")\n        if not id in cache:\n            print("Warning, framework missing cache index", key, "id =", id, " - The test will be skipped for now.")\n            if self._setup_answers_mode:\n                _expected = first # Bypass by setting equal to first. This is in case multiple self.assertEqualC\'s are run in a row and have to be set.\n\n        # The order of these calls is important. If the method assert fails, we should still store the correct result in cache.\n        cache[id] = first\n        self._cache_put(key, cache)\n        self._assert_cache_index += 1\n        if not self._setup_answers_mode:\n            assert_fun(first, _expected, *args, **kwargs)\n        else:\n            try:\n                assert_fun(first, _expected, *args, **kwargs)\n            except Exception as e:\n                print("Mumble grumble. Cache function failed during class setup. Most likely due to old cache. Re-run deploy to check it pass.", id)\n                print("> first", first)\n                print("> expected", _expected)\n                print(e)\n\n\n    def assertEqualC(self, first, msg=None):\n        self.wrap_assert(self.assertEqual, first, msg)\n\n    def _shape_equal(self, first, second):\n        a1 = np.asarray(first).squeeze()\n        a2 = np.asarray(second).squeeze()\n        msg = None\n        msg = "" if msg is None else msg\n        if len(msg) > 0:\n            msg += "\\n"\n        self.assertEqual(a1.shape, a2.shape, msg=msg + "Dimensions of input data does not agree.")\n        assert(np.all(np.isinf(a1) == np.isinf(a2)))  # Check infinite part.\n        a1[np.isinf(a1)] = 0\n        a2[np.isinf(a2)] = 0\n        diff = np.abs(a1 - a2)\n        return diff\n\n    def assertLinf(self, first, second=None, tol=1e-5, msg=None):\n        """ Test in the L_infinity norm.\n        :param first:\n        :param second:\n        :param tol:\n        :param msg:\n        :return:\n        """\n        if second is None:\n            return self.wrap_assert(self.assertLinf, first, tol=tol, msg=msg)\n        else:\n            diff = self._shape_equal(first, second)\n            np.testing.assert_allclose(first, second, atol=tol)\n            \n            max_diff = max(diff.flat)\n            if max_diff >= tol:\n                from unittest.util import safe_repr\n                # msg = f\'{safe_repr(first)} != {safe_repr(second)} : Not equal within tolerance {tol}\'\n                # print(msg)\n                # np.testing.assert_almost_equal\n                # import numpy as np\n                print(f"|first - second|_max = {max_diff} > {tol} ")\n                np.testing.assert_almost_equal(first, second)\n                # If the above fail, make sure to throw an error:\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=f\'Not equal within tolerance {tol}\')\n\n    def assertL2(self, first, second=None, tol=1e-5, msg=None, relative=False):\n        if second is None:\n            return self.wrap_assert(self.assertL2, first, tol=tol, msg=msg, relative=relative)\n        else:\n            # We first test using numpys build-in testing method to see if one coordinate deviates a great deal.\n            # This gives us better output, and we know that the coordinate wise difference is lower than the norm difference.\n            if not relative:\n                np.testing.assert_allclose(first, second, atol=tol)\n            diff = self._shape_equal(first, second)\n            diff = ( ( np.asarray( diff.flatten() )**2).sum() )**.5\n\n            scale = (2/(np.linalg.norm(np.asarray(first).flat) + np.linalg.norm(np.asarray(second).flat)) ) if relative else 1\n            max_diff = diff*scale\n            if max_diff >= tol:\n                msg = "" if msg is None else msg\n                print(f"|first - second|_2 = {max_diff} > {tol} ")\n                # Deletage to numpy. Let numpy make nicer messages.\n                np.testing.assert_almost_equal(first, second) # This function does not take a msg parameter.\n                # Make sure to throw an error no matter what.\n                self.assertFalse(max_diff >= tol, msg=f\'Input arrays are not equal within tolerance {tol}\')\n                # self.assertEqual(first, second, msg=msg + f"Not equal within tolerance {tol}")\n\n    @classmethod\n    def _cache_file(cls):\n        return os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__ + ".pkl"\n\n    @classmethod\n    def _artifact_file_for_setUpClass(cls):\n        file = os.path.join(os.path.dirname(cls._cache_file()), ""+cls.__name__+"-setUpClass.json")\n        print("_artifact_file_for_setUpClass(cls): will return", file, "__class__", cls)\n        # cf = os.path.dirname(inspect.getabsfile(cls)) + "/unitgrade_data/" + cls.__name__\n        return file\n\n    def _artifact_file(self):\n        """ File for the artifacts DB (thread safe). This file is optinal. Note that it is a pupdb database file.\n        Note the file is shared between all sub-questions. """\n        return os.path.join(os.path.dirname(self.__class__._cache_file()), \'-\'.join(self.cache_id()) + ".json")\n\n    def _save_cache(self):\n        # get the class name (i.e. what to save to).\n        cfile = self.__class__._cache_file()\n        if not os.path.isdir(os.path.dirname(cfile)):\n            os.makedirs(os.path.dirname(cfile))\n\n        if hasattr(self.__class__, \'_cache2\'):\n            with open(cfile, \'wb\') as f:\n                pickle.dump(self.__class__._cache2, f)\n\n    # But you can also set cache explicitly.\n    def _load_cache(self):\n        if self._cache is not None:  # Cache already loaded. We will not load it twice.\n            return\n            # raise Exception("Loaded cache which was already set. What is going on?!")\n        cfile = self.__class__._cache_file()\n        if os.path.exists(cfile):\n            try:\n                with open(cfile, \'rb\') as f:\n                    data = pickle.load(f)\n                self.__class__._cache = data\n            except Exception as e:\n                print("Cache file did not exist:", cfile)\n                print(e)\n        else:\n            print("Warning! data file not found", cfile)\n\n    def _get_coverage_files(self):\n        key = (self.cache_id(), \'coverage\')\n        # CC = None\n        # if self._cache_contains(key):\n        return self._cache_get(key, []) # Anything wrong with the empty list?\n        # return CC\n\n    def _get_hints(self):\n        """\n            This code is run when the test is set up to generate the hints and store them in an artifact file. It may be beneficial to simple compute them beforehand\n            and store them in the local unitgrade pickle file. This code is therefore expected to superceede the alterative code later.\n        """\n        hints = []\n        # print("Getting hint")\n        key = (self.cache_id(), \'coverage\')\n        if self._cache_contains(key):\n            CC = self._cache_get(key)\n            # cl, m = self.cache_id()\n            # print("Getting hint using", CC)\n            # Insert newline to get better formatting.\n            # gprint(\n            #     f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n            for file in CC:\n                rec = CC[file]\n                # gprint(f">   * {file}")\n                for l in rec:\n                    _, comments = CC[file][l]\n                    hint = get_hints(comments)\n\n                    if hint != None:\n                        hints.append((hint, file, l))\n\n        doc = self._testMethodDoc\n        # print("doc", doc)\n        if doc is not None:\n            hint = get_hints(self._testMethodDoc)\n            if hint is not None:\n                hints = [(hint, None, self.cache_id()[1])] + hints\n\n        return hints\n\n    def _feedErrorsToResult(self, result, errors):\n        """ Use this to show hints on test failure.\n        It feeds error to the result -- so if there are errors, they will crop up here\n        """\n        self._error_fed_during_run = errors.copy() # import to copy the error list.\n\n        # result._test._error_fed_during_run = errors.copy()\n\n        if not isinstance(result, UTextResult):\n            er = [e for e, v in errors if v != None]\n            # print("Errors are", errors)\n            if len(er) > 0:\n                hints = []\n                key = (self.cache_id(), \'coverage\')\n                if self._cache_contains(key):\n                    CC = self._cache_get(key)\n                    cl, m = self.cache_id()\n                    # Insert newline to get better formatting.\n                    gprint(f"\\n> An error occured during the test: {cl}.{m}. The following files/methods has code in them you are supposed to edit and may therefore be the cause of the problem:")\n                    for file in CC:\n                        rec = CC[file]\n                        gprint(f">   * {file}")\n                        for l in rec:\n                            _, comments = CC[file][l]\n                            hint = get_hints(comments)\n\n                            if hint != None:\n                                hints.append((hint, file, l) )\n                            gprint(f">      - {l}")\n\n                er = er[0]\n\n                doc = er._testMethodDoc\n                # print("doc", doc)\n                if doc is not None:\n                    hint = get_hints(er._testMethodDoc)\n                    if hint is not None:\n                        hints = [(hint, None, self.cache_id()[1] )] + hints\n                if len(hints) > 0:\n                    # print(hints)\n                    for hint, file, method in hints:\n                        s = (f"\'{method.strip()}\'" if method is not None else "")\n                        if method is not None and file is not None:\n                            s += " in "\n                        try:\n                            s += (file.strip() if file is not None else "")\n                            gprint(">")\n                            gprint("> Hints (from " + s + ")")\n                            gprint(textwrap.indent(hint, ">   "))\n                        except Exception as e:\n                            print("Bad stuff in hints. ")\n                            print(hints)\n        # result._last_errors = errors\n        super()._feedErrorsToResult(result, errors)\n        b = 234\n\n    def startTestRun(self):\n        super().startTestRun()\n\nclass Required:\n    pass\n\nclass ParticipationTest(UTestCase,Required):\n    max_group_size = None\n    students_in_group = None\n    workload_assignment = {\'Question 1\': [1, 0, 0]}\n\n    def test_students(self):\n        pass\n\n    def test_workload(self):\n        pass\n\n# 817, 705\nclass NotebookTestCase(UTestCase):\n    notebook = None\n    _nb = None\n    @classmethod\n    def setUpClass(cls) -> None:\n        with Capturing():\n            cls._nb = importnb.Notebook.load(cls.notebook)\n\n    @property\n    def nb(self):\n        return self.__class__._nb\n\n\nimport hashlib\nimport io\nimport tokenize\nimport numpy as np\nfrom tabulate import tabulate\nfrom datetime import datetime\nimport pyfiglet\nimport unittest\nimport inspect\nimport os\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Example: \nTo run all tests in a report: \n\n> python assignment1_dp.py\n\nTo run only question 2 or question 2.1\n\n> python assignment1_dp.py -q 2\n> python assignment1_dp.py -q 2.1\n\nNote this scripts does not grade your report. To grade your report, use:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'-q\', nargs=\'?\', type=str, default=None, help=\'Only evaluate this question (e.g.: -q 2)\')\nparser.add_argument(\'--showexpected\',  action="store_true",  help=\'Show the expected/desired result\')\nparser.add_argument(\'--showcomputed\',  action="store_true",  help=\'Show the answer your code computes\')\nparser.add_argument(\'--unmute\',  action="store_true",  help=\'Show result of print(...) commands in code\')\nparser.add_argument(\'--passall\',  action="store_true",  help=\'Automatically pass all tests. Useful when debugging.\')\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars.\')\n\ndef evaluate_report_student(report, question=None, qitem=None, unmute=None, passall=None, ignore_missing_file=False,\n                            show_tol_err=False, show_privisional=True, noprogress=None,\n                            generate_artifacts=True):\n    args = parser.parse_args()\n    if noprogress is None:\n        noprogress = args.noprogress\n\n    if question is None and args.q is not None:\n        question = args.q\n        if "." in question:\n            question, qitem = [int(v) for v in question.split(".")]\n        else:\n            question = int(question)\n\n    if hasattr(report, "computed_answer_file") and not os.path.isfile(report.computed_answers_file) and not ignore_missing_file:\n        raise Exception("> Error: The pre-computed answer file", os.path.abspath(report.computed_answers_file), "does not exist. Check your package installation")\n\n    if unmute is None:\n        unmute = args.unmute\n    if passall is None:\n        passall = args.passall\n\n    results, table_data = evaluate_report(report, question=question, show_progress_bar=not unmute and not noprogress, qitem=qitem,\n                                          verbose=False, passall=passall, show_expected=args.showexpected, show_computed=args.showcomputed,unmute=unmute,\n                                          show_tol_err=show_tol_err,\n                                          generate_artifacts=generate_artifacts)\n\n\n    if question is None and show_privisional:\n        print("Provisional evaluation")\n        tabulate(table_data)\n        table = table_data\n        print(tabulate(table))\n        print(" ")\n\n    fr = inspect.getouterframes(inspect.currentframe())[1].filename\n    gfile = os.path.basename(fr)[:-3] + "_grade.py"\n    if os.path.exists(gfile):\n        print("Note your results have not yet been registered. \\nTo register your results, please run the file:")\n        print(">>>", gfile)\n        print("In the same manner as you ran this file.")\n\n\n    return results\n\n\ndef upack(q):\n    # h = zip([(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()])\n    h =[(i[\'w\'], i[\'possible\'], i[\'obtained\']) for i in q.values()]\n    h = np.asarray(h)\n    return h[:,0], h[:,1], h[:,2],\n\nclass SequentialTestLoader(unittest.TestLoader):\n    def getTestCaseNames(self, testCaseClass):\n        test_names = super().getTestCaseNames(testCaseClass)\n        # testcase_methods = list(testCaseClass.__dict__.keys())\n        ls = []\n        for C in testCaseClass.mro():\n            if issubclass(C, unittest.TestCase):\n                ls = list(C.__dict__.keys()) + ls\n        testcase_methods = ls\n        test_names.sort(key=testcase_methods.index)\n        return test_names\n\ndef evaluate_report(report, question=None, qitem=None, passall=False, verbose=False,  show_expected=False, show_computed=False,unmute=False, show_help_flag=True, silent=False,\n                    show_progress_bar=True,\n                    show_tol_err=False,\n                    generate_artifacts=True, # Generate the artifact .json files. These are exclusively used by the dashboard.\n                    big_header=True):\n\n    now = datetime.now()\n    if big_header:\n        ascii_banner = pyfiglet.figlet_format("UnitGrade", font="doom")\n        b = "\\n".join( [l for l in ascii_banner.splitlines() if len(l.strip()) > 0] )\n    else:\n        b = "Unitgrade"\n    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")\n    print(b + " v" + __version__ + ", started: " + dt_string+ "\\n")\n    # print("Started: " + dt_string)\n    report._check_remote_versions() # Check (if report.url is present) that remote files exist and are in sync.\n    s = report.title\n    if hasattr(report, "version") and report.version is not None:\n        s += f" version {report.version}"\n    print(s, "(use --help for options)" if show_help_flag else "")\n    # print(f"Loaded answers from: ", report.computed_answers_file, "\\n")\n    table_data = []\n    t_start = time.time()\n    score = {}\n    loader = SequentialTestLoader()\n\n    for n, (q, w) in enumerate(report.questions):\n        q._generate_artifacts = generate_artifacts  # Set whether artifact .json files will be generated.\n        if question is not None and n+1 != question:\n            continue\n        suite = loader.loadTestsFromTestCase(q)\n        qtitle = q.question_title() if hasattr(q, \'question_title\') else q.__qualname__\n        if not report.abbreviate_questions:\n            q_title_print = "Question %i: %s"%(n+1, qtitle)\n        else:\n            q_title_print = "q%i) %s" % (n + 1, qtitle)\n\n        print(q_title_print, end="")\n        q.possible = 0\n        q.obtained = 0\n        # q_ = {} # Gather score in this class.\n        UTextResult.q_title_print = q_title_print # Hacky\n        UTextResult.show_progress_bar = show_progress_bar # Hacky.\n        UTextResult.number = n\n        UTextResult.nL = report.nL\n        UTextResult.unmute = unmute # Hacky as well.\n        UTextResult.setUpClass_time = q._cache.get(((q.__name__, \'setUpClass\'), \'time\'), 3) if hasattr(q, \'_cache\') and q._cache is not None else 3\n\n\n        res = UTextTestRunner(verbosity=2, resultclass=UTextResult).run(suite)\n        details = {}\n        for s, msg in res.successes + res.failures + res.errors:\n            # from unittest.suite import _ErrorHolder\n            # from unittest import _Err\n            # if isinstance(s, _ErrorHolder)\n            if hasattr(s, \'_testMethodName\'):\n                key = (q.__name__, s._testMethodName)\n            else:\n                # In case s is an _ErrorHolder (unittest.suite)\n                key = (q.__name__, s.id())\n            # key = (q.__name__, s._testMethodName) # cannot use the cache_id method bc. it is not compatible with plain unittest.\n\n            detail = {}\n            if (s,msg) in res.successes:\n                detail[\'status\'] = "pass"\n            elif (s,msg) in res.failures:\n                detail[\'status\'] = \'fail\'\n            elif (s,msg) in res.errors:\n                detail[\'status\'] = \'error\'\n            else:\n                raise Exception("Status not known.")\n\n            nice_title = s.title\n            detail = {**detail, **msg, \'nice_title\': nice_title}#[\'message\'] = msg\n            details[key] = detail\n\n        # q_[s._testMethodName] = ("pass", None)\n        # for (s,msg) in res.failures:\n        #     q_[s._testMethodName] = ("fail", msg)\n        # for (s,msg) in res.errors:\n        #     q_[s._testMethodName] = ("error", msg)\n        # res.successes[0]._get_outcome()\n\n        possible = res.testsRun\n        obtained = len(res.successes)\n\n        # assert len(res.successes) +  len(res.errors) + len(res.failures) == res.testsRun\n\n        obtained = int(w * obtained * 1.0 / possible ) if possible > 0 else 0\n        score[n] = {\'w\': w, \'possible\': w, \'obtained\': obtained, \'items\': details, \'title\': qtitle, \'name\': q.__name__,\n                   }\n        q.obtained = obtained\n        q.possible = possible\n        # print(q._cache)\n        # print(q._covcache)\n        s1 = f" * q{n+1})   Total"\n        s2 = f" {q.obtained}/{w}"\n        print(s1 + ("."* (report.nL-len(s1)-len(s2) )) + s2 )\n        print(" ")\n        table_data.append([f"q{n+1}) Total", f"{q.obtained}/{w}"])\n\n    ws, possible, obtained = upack(score)\n    possible = int( msum(possible) )\n    obtained = int( msum(obtained) ) # Cast to python int\n    report.possible = possible\n    report.obtained = obtained\n    now = datetime.now()\n    dt_string = now.strftime("%H:%M:%S")\n\n    dt = int(time.time()-t_start)\n    minutes = dt//60\n    seconds = dt - minutes*60\n    plrl = lambda i, s: str(i) + " " + s + ("s" if i != 1 else "")\n\n    dprint(first = "Total points at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +")",\n           last=""+str(report.obtained)+"/"+str(report.possible), nL = report.nL)\n\n    # print(f"Completed at "+ dt_string + " (" + plrl(minutes, "minute") + ", "+ plrl(seconds, "second") +"). Total")\n\n    table_data.append(["Total", ""+str(report.obtained)+"/"+str(report.possible) ])\n    results = {\'total\': (obtained, possible), \'details\': score}\n    return results, table_data\n\n\ndef python_code_str_id(python_code, strip_comments_and_docstring=True):\n    s = python_code\n\n    if strip_comments_and_docstring:\n        try:\n            s = remove_comments_and_docstrings(s)\n        except Exception as e:\n            print("--"*10)\n            print(python_code)\n            print(e)\n\n    s = "".join([c.strip() for c in s.split()])\n    hash_object = hashlib.blake2b(s.encode())\n    return hash_object.hexdigest()\n\n\ndef file_id(file, strip_comments_and_docstring=True):\n    with open(file, \'r\') as f:\n        # s = f.read()\n        return python_code_str_id(f.read())\n\n\ndef remove_comments_and_docstrings(source):\n    """\n    Returns \'source\' minus comments and docstrings.\n    """\n    io_obj = io.StringIO(source)\n    out = ""\n    prev_toktype = tokenize.INDENT\n    last_lineno = -1\n    last_col = 0\n    for tok in tokenize.generate_tokens(io_obj.readline):\n        token_type = tok[0]\n        token_string = tok[1]\n        start_line, start_col = tok[2]\n        end_line, end_col = tok[3]\n        ltext = tok[4]\n        # The following two conditionals preserve indentation.\n        # This is necessary because we\'re not using tokenize.untokenize()\n        # (because it spits out code with copious amounts of oddly-placed\n        # whitespace).\n        if start_line > last_lineno:\n            last_col = 0\n        if start_col > last_col:\n            out += (" " * (start_col - last_col))\n        # Remove comments:\n        if token_type == tokenize.COMMENT:\n            pass\n        # This series of conditionals removes docstrings:\n        elif token_type == tokenize.STRING:\n            if prev_toktype != tokenize.INDENT:\n        # This is likely a docstring; double-check we\'re not inside an operator:\n                if prev_toktype != tokenize.NEWLINE:\n                    # Note regarding NEWLINE vs NL: The tokenize module\n                    # differentiates between newlines that start a new statement\n                    # and newlines inside of operators such as parens, brackes,\n                    # and curly braces.  Newlines inside of operators are\n                    # NEWLINE and newlines that start new code are NL.\n                    # Catch whole-module docstrings:\n                    if start_col > 0:\n                        # Unlabelled indentation means we\'re inside an operator\n                        out += token_string\n                    # Note regarding the INDENT token: The tokenize module does\n                    # not label indentation inside of an operator (parens,\n                    # brackets, and curly braces) as actual indentation.\n                    # For example:\n                    # def foo():\n                    #     "The spaces before this docstring are tokenize.INDENT"\n                    #     test = [\n                    #         "The spaces before this string do not get a token"\n                    #     ]\n        else:\n            out += token_string\n        prev_toktype = token_type\n        last_col = end_col\n        last_lineno = end_line\n    return out\n\nimport textwrap\nimport bz2\nimport pickle\nimport os\nimport zipfile\nimport io\n\ndef bzwrite(json_str, token): # to get around obfuscation issues\n    with getattr(bz2, \'open\')(token, "wt") as f:\n        f.write(json_str)\n\ndef gather_imports(imp):\n    resources = {}\n    m = imp\n    f = m.__file__\n    if hasattr(m, \'__file__\') and not hasattr(m, \'__path__\'):\n        top_package = os.path.dirname(m.__file__)\n        module_import = True\n    else:\n        im = __import__(m.__name__.split(\'.\')[0])\n        if isinstance(im, list):\n            print("im is a list")\n            print(im)\n        # the __path__ attribute *may* be a string in some cases. I had to fix this.\n        print("path.:",  __import__(m.__name__.split(\'.\')[0]).__path__)\n        # top_package = __import__(m.__name__.split(\'.\')[0]).__path__._path[0]\n        top_package = __import__(m.__name__.split(\'.\')[0]).__path__[0]\n        module_import = False\n\n    found_hashes = {}\n    # pycode = {}\n    resources[\'pycode\'] = {}\n    zip_buffer = io.BytesIO()\n    with zipfile.ZipFile(zip_buffer, \'w\') as zip:\n        for root, dirs, files in os.walk(top_package):\n            for file in files:\n                if file.endswith(".py"):\n                    fpath = os.path.join(root, file)\n                    v = os.path.relpath(fpath, os.path.dirname(top_package) if not module_import else top_package)\n                    zip.write(fpath, v)\n                    if not fpath.endswith("_grade.py"): # Exclude grade files.\n                        with open(fpath, \'r\') as f:\n                            s = f.read()\n                        found_hashes[v] = python_code_str_id(s)\n                        resources[\'pycode\'][v] = s\n\n    resources[\'zipfile\'] = zip_buffer.getvalue()\n    resources[\'top_package\'] = top_package\n    resources[\'module_import\'] = module_import\n    resources[\'blake2b_file_hashes\'] = found_hashes\n    return resources, top_package\n\n\nimport argparse\nparser = argparse.ArgumentParser(description=\'Evaluate your report.\', epilog="""Use this script to get the score of your report. Example:\n\n> python report1_grade.py\n\nFinally, note that if your report is part of a module (package), and the report script requires part of that package, the -m option for python may be useful.\nFor instance, if the report file is in Documents/course_package/report3_complete.py, and `course_package` is a python package, then change directory to \'Documents/` and run:\n\n> python -m course_package.report1\n\nsee https://docs.python.org/3.9/using/cmdline.html\n""", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\'--noprogress\',  action="store_true",  help=\'Disable progress bars\')\nparser.add_argument(\'--autolab\',  action="store_true",  help=\'Show Autolab results\')\n\ndef gather_report_source_include(report):\n    sources = {}\n    # print("")\n    # if not args.autolab:\n    if len(report.individual_imports) > 0:\n        print("By uploading the .token file, you verify the files:")\n        for m in report.individual_imports:\n            print(">", m.__file__)\n        print("Are created/modified individually by you in agreement with DTUs exam rules")\n        report.pack_imports += report.individual_imports\n\n    if len(report.pack_imports) > 0:\n        print("Including files in upload...")\n        for k, m in enumerate(report.pack_imports):\n            nimp, top_package = gather_imports(m)\n            _, report_relative_location, module_import = report._import_base_relative()\n\n            nimp[\'report_relative_location\'] = report_relative_location\n            nimp[\'report_module_specification\'] = module_import\n            nimp[\'name\'] = m.__name__\n            sources[k] = nimp\n            print(f" * {m.__name__}")\n    return sources\n\ndef gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_source=False):\n    # n = report.nL\n    args = parser.parse_args()\n    results, table_data = evaluate_report(report, show_help_flag=False, show_expected=False, show_computed=False, silent=True,\n                                          show_progress_bar=not args.noprogress,\n                                          big_header=not args.autolab,\n                                          generate_artifacts=False,\n                                          )\n    print("")\n    sources = {}\n    if not args.autolab:\n        results[\'sources\'] = sources = gather_report_source_include(report)\n\n    token_plain = """\n# This file contains your results. Do not edit its content. Simply upload it as it is. """\n\n    s_include = [token_plain]\n    known_hashes = []\n    cov_files = []\n    use_coverage = True\n    if report._config is not None:\n        known_hashes = report._config[\'blake2b_file_hashes\']\n        for Q, _ in report.questions:\n            use_coverage = use_coverage and isinstance(Q, UTestCase)\n            for key in Q._cache:\n                if len(key) >= 2 and key[1] == "coverage":\n                    for f in Q._cache[key]:\n                        cov_files.append(f)\n\n    for s in sources.values():\n        for f_rel, hash in s[\'blake2b_file_hashes\'].items():\n            if hash in known_hashes and f_rel not in cov_files and use_coverage:\n                print("Skipping", f_rel)\n            else:\n                if token_include_plaintext_source:\n                    s_include.append("#"*3 +" Content of " + f_rel +" " + "#"*3)\n                    s_include.append("")\n                    s_include.append(s[\'pycode\'][f_rel])\n                    s_include.append("")\n\n    if output_dir is None:\n        output_dir = os.getcwd()\n\n    payload_out_base = report.__class__.__name__ + "_handin"\n\n    obtain, possible = results[\'total\']\n    vstring = f"_v{report.version}" if report.version is not None else ""\n    token = "%s_%i_of_%i%s.token"%(payload_out_base, obtain, possible,vstring)\n    token = os.path.normpath(os.path.join(output_dir, token))\n\n    save_token(results, "\\n".join(s_include), token)\n\n    if not args.autolab:\n        print("> Testing token file integrity...", sep="")\n        load_token(token)\n        print("Done!")\n        print(" ")\n        print("To get credit for your results, please upload the single unmodified file: ")\n        print(">", token)\n\n\ndef save_token(dictionary, plain_text, file_out):\n    if plain_text is None:\n        plain_text = ""\n    if len(plain_text) == 0:\n        plain_text = "Start token file"\n    plain_text = plain_text.strip()\n    b, b_hash = dict2picklestring(dictionary)\n    b_l1 = len(b)\n    b = "."+b+"."\n    b = "\\n".join( textwrap.wrap(b, 180))\n\n    out = [plain_text, token_sep, f"{b_hash} {b_l1}", token_sep, b]\n    with open(file_out, \'w\') as f:\n        f.write("\\n".join(out))\n\n\n\n\ndef source_instantiate(name, report1_source, payload):\n    # print("Executing sources", report1_source)\n    eval("exec")(report1_source, globals())\n    # print("Loaind gpayload..")\n    pl = pickle.loads(bytes.fromhex(payload))\n    report = eval(name)(payload=pl, strict=True)\n    return report\n\n\n__version__ = "0.1.28.7"\n\nfrom cs108.homework1 import add, reverse_list, linear_regression_weights, linear_predict, foo\nimport time\nimport numpy as np\nimport pickle\nimport os\n# from unitgrade.framework import dash\n\ndef mk_bad():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 100, \'x2\': 300}\n        pickle.dump(d, f)\n\ndef mk_ok():\n    with open(os.path.dirname(__file__)+"/db.pkl", \'wb\') as f:\n        d = {\'x1\': 1, \'x2\': 2}\n        pickle.dump(d, f)\n\nclass Numpy(UTestCase):\n    z = 234\n\n    # def __getattr__(self, item):\n    #     print("hi there ", item)\n    #     return super().__getattr__(item)\n    #\n    # def __getattribute__(self, item):\n    #     print("oh hello sexy. ", item)\n    #     return super().__getattribute__(item)\n\n    @classmethod_dashboard\n    def setUpClass(cls) -> None:\n        print("Dum di dai, I am running some setup code here.")\n        for i in range(10):\n            print("Hello world", i)\n        print("Set up.") # must be handled seperately.\n        # assert False\n\n    # @cache\n    # def make_primes(self, n):\n    #     return primes(n)\n\n    def test_bad(self):\n        """\n        Hints:\n            * Remember to properly de-indent your code.\n            * Do more stuff which works.\n        """\n        # raise Exception("This ended poorly")\n        # print("Here we go")\n        # return\n        # self.assertEqual(1, 1)\n        with open(os.path.dirname(__file__)+"/db.pkl", \'rb\') as f:\n            d = pickle.load(f)\n        # print(d)\n        # assert False\n        # for i in range(10):\n        from tqdm import tqdm\n        for i in tqdm(range(100)):\n            # print("The current number is", i)\n            time.sleep(.01)\n        self.assertEqual(1, d[\'x1\'])\n        for b in range(10):\n            self.assertEqualC(add(3, b))\n\n\n    def test_weights(self):\n        """\n            Hints:\n            * Try harder!\n            * Check the chapter on linear regression.\n        """\n        n = 3\n        m = 2\n        np.random.seed(5)\n        # from numpy import asdfaskdfj\n        # X = np.random.randn(n, m)\n        # y = np.random.randn(n)\n        foo()\n        # assert 2 == 3\n        # raise Exception("Bad exit")\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertEqual(2, np.random.randint(1000))\n        # self.assertL2(linear_regression_weights(X, y), msg="the message")\n        self.assertEqual(1, 1)\n        # self.assertEqual(1,2)\n        return "THE RESULT OF THE TEST"\n\n\nclass AnotherTest(UTestCase):\n    def test_more(self):\n        self.assertEqual(2,2)\n\n    def test_even_more(self):\n        self.assertEqual(2,2)\n\nimport cs108\nclass Report2(Report):\n    title = "CS 101 Report 2"\n    questions = [\n        (Numpy, 10), (AnotherTest, 20)\n        ]\n    pack_imports = [cs108]'
+report1_payload = '8004954f040000000000007d94288c054e756d7079947d942868018c0a7365745570436c6173739486948c0474696d65948694473fa258a80000000068018c08746573745f6261649486948c057469746c6594869468076801680786948c066173736572749486947d94284b004b034b014b044b024b054b034b064b044b074b054b084b064b094b074b0a4b084b0b4b094b0c7568016807869468058694473ff0b862c00000006801680786948c08636f7665726167659486947d948c1263733130382f686f6d65776f726b312e7079947d948c0e6465662061646428612c62293a20944b128ca12020202022222220476976656e2074776f206e756d626572732060616020616e642060626020746869732066756e6374696f6e2073686f756c642073696d706c792072657475726e2074686569722073756d3a0a202020203e2061646428612c6229203d20612b620a2020202048696e74733a0a20202020202020202a2052656d656d6265722062617369632061726974686d6574696373210a20202020222222948694737368018c0c746573745f7765696768747394869468098694681a6801681a8694680c86947d946801681a869468058694473f667180000000006801681a8694681286947d948c1263733130382f686f6d65776f726b312e7079947d94288c0b64656620666f6f28293a20944b168c162020202022222220436f6d6d656e742e2020202222229486948c0b6465662062617228293a20944b198c009486947573758c0b416e6f7468657254657374947d9428682d6803869468058694473f3c5c0000000000682d8c09746573745f6d6f7265948694680c86947d94682d6831869468058694473f1c000000000000682d8c0e746573745f6576656e5f6d6f7265948694680c86947d94682d6837869468058694473f2f380000000000758c06636f6e666967947d948c13626c616b6532625f66696c655f686173686573945d94288c806533626432393138326330346430393339383337663665656532383132353463633933316664663433633765663532623139303636636161653463623836343739636131303266323234623536353565313732336462306264383035323931303538313161336561626364396234616366663139366435396332386532666261948c803463383365363937356661646561613036306264366663383765346461373936336433323633373839666235336466373930363632316662333966663432636631303336393330396237383165356662643231333736313365613065336339326336396534393237663765626464613563346431383236343863313263363566948c803031623433643134313830383363643939336264313234653433343637373832613035393466313832343039396639376363666231373034313832393737636536356230623537636331393838663565383439336663636361656330646263653834333466386661343539356330373039306265376138326366663536396361946573752e'
+name="Report2"
+
+report = source_instantiate(name, report1_source, report1_payload)
+output_dir = os.path.dirname(__file__)
+gather_upload_to_campusnet(report, output_dir)
\ No newline at end of file
diff --git a/devel/example_devel/students/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock b/devel/example_devel/students/cs108/unitgrade_data/AnotherTest-test_even_more.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/AnotherTest-test_more.json.lock b/devel/example_devel/students/cs108/unitgrade_data/AnotherTest-test_more.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/AnotherTest.pkl b/devel/example_devel/students/cs108/unitgrade_data/AnotherTest.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..24c5438e3054311e2708f4b9ded94f4653a2d38b
Binary files /dev/null and b/devel/example_devel/students/cs108/unitgrade_data/AnotherTest.pkl differ
diff --git a/devel/example_devel/students/cs108/unitgrade_data/Numpy-setUpClass.json.lock b/devel/example_devel/students/cs108/unitgrade_data/Numpy-setUpClass.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/Numpy-test_bad.json.lock b/devel/example_devel/students/cs108/unitgrade_data/Numpy-test_bad.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/Numpy-test_weights.json.lock b/devel/example_devel/students/cs108/unitgrade_data/Numpy-test_weights.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/Numpy.pkl b/devel/example_devel/students/cs108/unitgrade_data/Numpy.pkl
index d19ad9cc2d7645efefec819c07d340a18444d86a..7c5e9c80131e80eb81efb165fc259dddb4775212 100644
Binary files a/devel/example_devel/students/cs108/unitgrade_data/Numpy.pkl and b/devel/example_devel/students/cs108/unitgrade_data/Numpy.pkl differ
diff --git a/devel/example_devel/students/cs108/unitgrade_data/main_config_report_devel.json b/devel/example_devel/students/cs108/unitgrade_data/main_config_report_devel.json
new file mode 100644
index 0000000000000000000000000000000000000000..9fc7ac2ff6f9dba2721fe6695e4811f5628c40be
--- /dev/null
+++ b/devel/example_devel/students/cs108/unitgrade_data/main_config_report_devel.json
@@ -0,0 +1 @@
+{"encoding_scheme": " from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;", "questions": "/Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4AQcAjVdAEABDm5i8vf61cV4iM6ul0/3ELnVhbfZugbR5l8KiW5BMDZ6L8RzKzXjQq+BhcgH2MCJ1m9+eUsGtZHT9kvtqsxPVkTSO3NwafxFXIS31YBI97jN5gntTcpCbg9t7AU0q//uau3ShvdPaadcLZuF/lN1xnW+JdPnvUDda+75aqeRNDn+kY3o89kwQiwQhudeIbf1KsgHaP3ro1KTBEyGIx+RfVFRmLUamFJv+PaDrgWnfH2G15B/Nw97c00KtNTs8nnKN9mzCxdAcg/ipvo1F9Iq6RqoOykxE//nfhuWxpJFQaIvYGmBXBzVSuJKP9HL5/4OuCCZJxyDdDrb+1Ec0nfy8m+hZpw/HNNA9vdgHpEHsYrqqGaOzF49QL+ZOsYdqe46XdqwI0KykT/fsfKyiaF1y8SoMkrom6BX5hE1iK8VDQgB8wzazLGdon9I90kBgi8J4pLlUgKHOG38Ch8RyvZWdpc/B5KMisnFMlRShbamdtZOpBNTVc9zzOTq7XsQqitkOQxL2ZBO/lUcN5LZne6uxCzM5wxtqDUnQVZONK0QzpXWmJUIdmxmDdX8gXlVM6uxSVd1fx0X0usMqy/syHGMltcQcywlRoYx5WZXtuZiwHbWvt05Jy++cClwYPgC01nUeWpLsULl8s0L0nE7M06h4hzhnZ7duF/cI3rUyWNCu8l3G+SkmMtimYrN9dZ3Nqpshvni0LreBYTO7M/BXNi6jBpHAMxLJGyEA/O1f0LAIuqSmoYAAAAASHWmfOv1ibsAAdEEnQgAAFbVjq6xxGf7AgAAAAAEWVo=", "root_dir": "/home/tuhe/Documents/unitgrade_private/devel/example_devel/instructor", "relative_path": "cs108/report_devel.py", "modules": ["cs108", "report_devel"], "token_stub": "cs108/Report2_handin"}
\ No newline at end of file
diff --git a/devel/example_devel/students/cs108/unitgrade_data/main_config_report_devel.json.lock b/devel/example_devel/students/cs108/unitgrade_data/main_config_report_devel.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/devel/example_devel/students/cs108/unitgrade_data/report_devel.json.lock b/devel/example_devel/students/cs108/unitgrade_data/report_devel.json.lock
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/requirements.txt b/requirements.txt
index 50fd70cae5e15aa30e95150c5054ac51e504f089..2a45e4b775c8c76b0d333e0bca33db17a5343241 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
 mosspy
 jinja2
-#unitgrade
-setuptools==57 # This is because of pyminifier (mumble, grumble)
-pyminifier
+unitgrade
+# setuptools==57 # This is because of pyminifier (mumble, grumble)
+# pyminifier         # No longer needed; bundled.
diff --git a/setup.py b/setup.py
index e891c68d311d34fcc259323ea428b50aad827912..fea73ea8c94e4f1de994daec49b4c16a22fd21f3 100644
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ setuptools.setup(
     packages=setuptools.find_packages(where="src"),
     include_package_data=True,
     python_requires=">=3.8",
-    install_requires=['numpy', "unitgrade", "codesnipper", 'tabulate', 'tqdm', "pyfiglet",
-                      "colorama", "coverage", 'mosspy', # 'pyminifier',  cannot use pyminifier because 2to3 issue.
+    install_requires=['numpy', "codesnipper", 'tabulate', 'tqdm', "pyfiglet",
+                      "colorama", "coverage", # 'pyminifier',  cannot use pyminifier because 2to3 issue. bundled. will that work?
                       'mosspy'],
 )
diff --git a/src/pyminifier_bundled/__init__.py b/src/pyminifier_bundled/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bde7685bab72d7cf79f29fc085068de49be268bc
--- /dev/null
+++ b/src/pyminifier_bundled/__init__.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+#
+#       Copyright 2013 Liftoff Software Corporation
+#
+# For license information see LICENSE.txt
+
+
+
+# Meta
+__version__ = '2.2'
+__version_info__ = (2, 2)
+__license__ = "GPLv3" # See LICENSE.txt
+__author__ = 'Dan McDougall <daniel.mcdougall@liftoffsoftware.com>'
+
+# TODO: Add the ability to mark variables, functions, classes, and methods for non-obfuscation.
+# TODO: Add the ability to selectively obfuscate identifiers inside strings (for metaprogramming stuff).
+# TODO: Add the ability to use a config file instead of just command line args.
+# TODO: Add the ability to save a file that allows for de-obfuscation later (or at least the ability to debug).
+# TODO: Separate out the individual functions of minification so that they can be chosen selectively like the obfuscation functions.
+
+__doc__ = """\
+**Python Minifier:**  Reduces the size of (minifies) Python code for use on
+embedded platforms.
+
+Performs the following:
+
+    * Removes docstrings.
+    * Removes comments.
+    * Minimizes code indentation.
+    * Removes trailing commas.
+    * Joins multiline pairs of parentheses, braces, and brackets (and removes extraneous whitespace within).
+    * Joins disjointed strings like, ``("some" "disjointed" "string")`` into single strings: ``('''some disjointed string''')
+    * Preserves shebangs and encoding info (e.g. "# -- coding: utf-8 --").
+    * Optionally, produces a bzip2 or gzip-compressed self-extracting python script containing the minified source for ultimate minification. *Added in version 1.4*
+    * Optionally, obfuscates the code using the shortest possible combination of letters and numbers for one or all of class names, function/method names, and variables. The options are ``--obfuscate`` or ``-O`` to obfuscate everything, ``--obfuscate-variables``, ``--obfuscate-functions``, and ``--obfuscate-classes`` to obfuscate things individually (say, if you wanted to keep your module usable by external programs).  *Added in version 2.0*
+    * Optionally, a value may be specified via --replacement-length to set the minimum length of random strings that are used to replace identifier names when obfuscating.
+    * Optionally, if using Python 3, you may specify ``--nonlatin`` to use funky unicode characters when obfuscating. WARNING: This will result in some seriously hard-to-read code! **Tip:** Combine this setting with higher ``--replacement-length`` values to make the output even wackier.  *Added in version 2.0*
+    * Pyminifier can now minify/obfuscate an arbitrary number of Python scripts in one go.  For example, ``pyminifier -O *.py`` will minify and obfuscate all files in the current directory ending in .py.  To prevent issues with using differentiated obfuscated identifiers across multiple files, pyminifier will keep track of what replaces what via a lookup table to ensure foo_module.whatever is gets the same replacement across all source files.  *Added in version 2.0*
+    * Optionally, creates an executable zip archive (pyz) containing the minified/obfuscated source script and all implicit (local path) imported modules.  This mechanism automatically figures out which source files to include in the .pyz archive by analyzing the script passed to pyminifier on the command line (listing all the modules your script uses is unnecessary).  This is also the **ultimate** in minification/compression besting both the gzip and bzip2 compression mechanisms with the disadvantage that .pyz files cannot be imported into other Python scripts.  *Added in version 2.0*
+
+Just how much space can be saved by pyminifier?  Here's a comparison:
+
+    * The pyminifier source (all six files) takes up about 164k.
+    * Performing basic minification on all pyminifier source files reduces that to ~104k.
+    * Minification plus obfuscation provides a further reduction to 92k.
+    * Minification plus the base64-encoded gzip trick (--gzip) reduces it to 76k.
+    * Minification plus gzip compression plus obfuscation is also 76k (demonstrating that obfuscation makes no difference when compression algorthms are used).
+    * Using the --pyz option on pyminifier.py creates a ~14k .pyz file that includes all the aforementioned files.
+
+Various examples and edge cases are sprinkled throughout the pyminifier code so
+that it can be tested by minifying itself.  The way to test is thus:
+
+.. code-block:: bash
+
+    $ python __main__.py __main__.py > minified_pyminifier.py
+    $ python minified_pyminifier.py __main__.py > this_should_be_identical.py
+    $ diff minified_pyminifier.py this_should_be_identical.py
+    $
+
+If you get an error executing minified_pyminifier.py or
+``this_should_be_identical.py`` isn't identical to minified_pyminifier.py then
+something is broken.
+
+.. note::
+
+    The test functions below are meaningless.  They only serve as test/edge
+    cases for testing pyminifier.
+"""
+
+# Import built-in modules
+import os, sys, re, io
+from optparse import OptionParser
+# from collections import Iterable
+
+# Import our own modules
+from . import minification
+from . import token_utils
+from . import obfuscate
+from . import compression
+
+py3 = False
+lzma = False
+if not isinstance(sys.version_info, tuple):
+    if sys.version_info.major == 3:
+        py3 = True
+        try:
+            import lzma
+        except ImportError:
+            pass
+
+# Regexes
+multiline_indicator = re.compile('\\\\(\s*#.*)?\n')
+
+# The test.+() functions below are for testing pyminifier...
+def test_decorator(f):
+    """Decorator that does nothing"""
+    return f
+
+def test_reduce_operators():
+    """Test the case where an operator such as an open paren starts a line"""
+    (a, b) = 1, 2 # The indentation level should be preserved
+    pass
+
+def test_empty_functions():
+    """
+    This is a test function.
+    This should be replaced with 'def test_empty_functions(): pass'
+    """
+
+class test_class(object):
+    "Testing indented decorators"
+
+    @test_decorator
+    def test_function(self):
+        pass
+
+def test_function():
+    """
+    This function encapsulates the edge cases to prevent them from invading the
+    global namespace.
+    """
+    # This tests method obfuscation:
+    method_obfuscate = test_class()
+    method_obfuscate.test_function()
+    foo = ("The # character in this string should " # This comment
+           "not result in a syntax error") # ...and this one should go away
+    test_multi_line_list = [
+        'item1',
+        'item2',
+        'item3'
+    ]
+    test_multi_line_dict = {
+        'item1': 1,
+        'item2': 2,
+        'item3': 3
+    }
+    # It may seem strange but the code below tests our docstring removal code.
+    test_string_inside_operators = imaginary_function(
+        "This string was indented but the tokenizer won't see it that way."
+    ) # To understand how this could mess up docstring removal code see the
+    # minification.minification.remove_comments_and_docstrings() function
+    # starting at this line:
+    #     "elif token_type == tokenize.STRING:"
+    # This tests remove_extraneous_spaces():
+    this_line_has_leading_indentation    = '''<--That extraneous space should be
+                                              removed''' # But not these spaces
+
+def is_iterable(obj):
+    """
+    Returns `True` if *obj* is iterable but *not* if *obj* is a string, bytes,
+    or a bytearray.
+    """
+    if isinstance(obj, (str, bytes, bytearray)):
+        return False
+    return isinstance(obj, Iterable)
+
+def pyminify(options, files):
+    """
+    Given an *options* object (from `optparse.OptionParser` or similar),
+    performs minification and/or obfuscation on the given *files* (any iterable
+    containing file paths) based on said *options*.
+
+    All accepted options can be listed by running ``python __main__.py -h`` or
+    examining the :py:func:`__init__.main` function.
+    """
+    global name_generator
+    if not hasattr(files, '__iter__'): # is_iterable(files):
+        print(
+            "Error: The 'files' argument must be a list, tuple, etc of files.  "
+            "Strings and bytes won't work.")
+        sys.exit(1)
+    if options.pyz:
+        # Check to make sure we were only passed one script (only one at a time)
+        if len(files) > 1:
+            print("ERROR: The --pyz option only works with one python file at "
+                  "a time.")
+            print("(Dependencies will be automagically included in the "
+                  "resulting .pyz)")
+            sys.exit(1)
+        # Make our .pyz:
+        compression.zip_pack(files, options)
+        return None # Make sure we don't do anything else
+    # Read in our prepend text (if any)
+    prepend = None
+    if options.prepend:
+        try:
+            prepend = open(options.prepend).read()
+        except Exception as err:
+            print("Error reading %s:" % options.prepend)
+            print(err)
+
+    obfuscations = (options.obfuscate, options.obf_classes,
+                    options.obf_functions, options.obf_variables,
+                    options.obf_builtins, options.obf_import_methods)
+
+    # Automatically enable obfuscation if --nonlatin (implied if no explicit
+    # obfuscation is stated)
+    if options.use_nonlatin and not any(obfuscations):
+        options.obfuscate = True
+    if len(files) > 1: # We're dealing with more than one file
+        name_generator = None # So we can tell if we need to obfuscate
+        if any(obfuscations):
+            # Put together that will be used for all obfuscation functions:
+            identifier_length = int(options.replacement_length)
+            if options.use_nonlatin:
+                if sys.version_info[0] == 3:
+                    name_generator = obfuscate.obfuscation_machine(
+                        use_unicode=True, identifier_length=identifier_length
+                    )
+                else:
+                    print(
+                        "ERROR: You can't use nonlatin characters without Python 3")
+                    sys.exit(2)
+            else:
+                name_generator = obfuscate.obfuscation_machine(
+                    identifier_length=identifier_length)
+            table =[{}]
+        cumulative_size = 0 # For size reduction stats
+        cumulative_new = 0 # Ditto
+        for sourcefile in files:
+            # Record how big the file is so we can compare afterwards
+            filesize = os.path.getsize(sourcefile)
+            cumulative_size += filesize
+            # Get the module name from the path
+            module = os.path.split(sourcefile)[1]
+            module = ".".join(module.split('.')[:-1])
+            source = open(sourcefile).read()
+            tokens = token_utils.listified_tokenizer(source)
+            if not options.nominify: # Perform minification
+                source = minification.minify(tokens, options)
+            # Have to re-tokenize for obfucation (it is quick):
+            tokens = token_utils.listified_tokenizer(source)
+            # Perform obfuscation if any of the related options were set
+            if name_generator:
+                obfuscate.obfuscate(
+                    module,
+                    tokens,
+                    options,
+                    name_generator=name_generator,
+                    table=table
+                )
+            # Convert back to text
+            result = ''
+            if prepend:
+                result += prepend
+            result += token_utils.untokenize(tokens)
+            # Compress it if we were asked to do so
+            if options.bzip2:
+                result = compression.bz2_pack(result)
+            elif options.gzip:
+                result = compression.gz_pack(result)
+            elif lzma and options.lzma:
+                result = compression.lzma_pack(result)
+            result += (
+                "# Created by pyminifier "
+                "(https://github.com/liftoff/pyminifier)\n")
+            # Either save the result to the output file or print it to stdout
+            if not os.path.exists(options.destdir):
+                os.mkdir(options.destdir)
+            # Need the path where the script lives for the next steps:
+            filepath = os.path.split(sourcefile)[1]
+            path = options.destdir + '/' + filepath # Put everything in destdir
+            f = open(path, 'w')
+            f.write(result)
+            f.close()
+            new_filesize = os.path.getsize(path)
+            cumulative_new += new_filesize
+            percent_saved = round((float(new_filesize) / float(filesize)) * 100, 2) if float(filesize)!=0 else 0
+            print((
+                "{sourcefile} ({filesize}) reduced to {new_filesize} bytes "
+                "({percent_saved}% of original size)").format(**locals()))
+        p_saved = round(
+            (float(cumulative_new) / float(cumulative_size) * 100), 2)
+        print("Overall size reduction: {0}% of original size".format(p_saved))
+    else:
+        # Get the module name from the path
+        _file = files[0]
+        module = os.path.split(_file)[1]
+        module = ".".join(module.split('.')[:-1])
+        filesize = os.path.getsize(_file)
+        source = open(_file).read()
+        # Convert the tokens from a tuple of tuples to a list of lists so we can
+        # update in-place.
+        tokens = token_utils.listified_tokenizer(source)
+        if not options.nominify: # Perform minification
+            source = minification.minify(tokens, options)
+            # Convert back to tokens in case we're obfuscating
+            tokens = token_utils.listified_tokenizer(source)
+        # Perform obfuscation if any of the related options were set
+        if options.obfuscate or options.obf_classes or options.obf_functions \
+                or options.obf_variables or options.obf_builtins \
+                or options.obf_import_methods:
+            identifier_length = int(options.replacement_length)
+            name_generator = obfuscate.obfuscation_machine(
+                identifier_length=identifier_length)
+            obfuscate.obfuscate(module, tokens, options)
+        # Convert back to text
+        result = ''
+        if prepend:
+            result += prepend
+        result += token_utils.untokenize(tokens)
+        # Compress it if we were asked to do so
+        if options.bzip2:
+            result = compression.bz2_pack(result)
+        elif options.gzip:
+            result = compression.gz_pack(result)
+        elif lzma and options.lzma:
+            result = compression.lzma_pack(result)
+        result += (
+            "# Created by pyminifier "
+            "(https://github.com/liftoff/pyminifier)\n")
+        # Either save the result to the output file or print it to stdout
+        if options.outfile:
+            f = io.open(options.outfile, 'w', encoding='utf-8')
+            f.write(result)
+            f.close()
+            new_filesize = os.path.getsize(options.outfile)
+            percent_saved = round(float(new_filesize)/float(filesize) * 100, 2)
+            print((
+                "{_file} ({filesize}) reduced to {new_filesize} bytes "
+                "({percent_saved}% of original size)".format(**locals())))
+        else:
+            print(result)
diff --git a/src/pyminifier_bundled/__main__.py b/src/pyminifier_bundled/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3773d529bdbaa663a279e32742bf11c178595d2
--- /dev/null
+++ b/src/pyminifier_bundled/__main__.py
@@ -0,0 +1,189 @@
+from optparse import OptionParser
+import sys
+
+from . import pyminify
+from . import __version__
+
+py3 = False
+lzma = False
+if not isinstance(sys.version_info, tuple):
+    if sys.version_info.major == 3:
+        py3 = True
+        try:
+            import lzma
+        except ImportError:
+            pass
+
+def get_parser():
+    """
+    Sets up our command line options, prints the usage/help (if warranted), and
+    runs :py:func:`pyminifier.pyminify` with the given command line options.
+    """
+    usage = '%prog [options] "<input file>"'
+    if '__main__.py' in sys.argv[0]: # python -m pyminifier
+        usage = 'pyminifier [options] "<input file>"'
+    parser = OptionParser(usage=usage, version=__version__)
+    parser.disable_interspersed_args()
+    parser.add_option(
+        "-o", "--outfile",
+        dest="outfile",
+        default=None,
+        help="Save output to the given file.",
+        metavar="<file path>"
+    )
+    parser.add_option(
+        "-d", "--destdir",
+        dest="destdir",
+        default="./minified",
+        help=("Save output to the given directory. "
+              "This option is required when handling multiple files. "
+              "Defaults to './minified' and will be created if not present. "),
+        metavar="<file path>"
+    )
+    parser.add_option(
+        "--nominify",
+        action="store_true",
+        dest="nominify",
+        default=False,
+        help="Don't bother minifying (only used with --pyz).",
+    )
+    parser.add_option(
+        "--use-tabs",
+        action="store_true",
+        dest="tabs",
+        default=False,
+        help="Use tabs for indentation instead of spaces.",
+    )
+    parser.add_option(
+        "--bzip2",
+        action="store_true",
+        dest="bzip2",
+        default=False,
+        help=("bzip2-compress the result into a self-executing python script.  "
+              "Only works on stand-alone scripts without implicit imports.")
+    )
+    parser.add_option(
+        "--gzip",
+        action="store_true",
+        dest="gzip",
+        default=False,
+        help=("gzip-compress the result into a self-executing python script.  "
+              "Only works on stand-alone scripts without implicit imports.")
+    )
+    if lzma:
+        parser.add_option(
+            "--lzma",
+            action="store_true",
+            dest="lzma",
+            default=False,
+            help=("lzma-compress the result into a self-executing python script.  "
+                  "Only works on stand-alone scripts without implicit imports.")
+        )
+    parser.add_option(
+        "--pyz",
+        dest="pyz",
+        default=None,
+        help=("zip-compress the result into a self-executing python script. "
+              "This will create a new file that includes any necessary implicit"
+              " (local to the script) modules.  Will include/process all files "
+              "given as arguments to pyminifier.py on the command line."),
+        metavar="<name of archive>.pyz"
+    )
+    parser.add_option(
+        "-O", "--obfuscate",
+        action="store_true",
+        dest="obfuscate",
+        default=False,
+        help=(
+            "Obfuscate all function/method names, variables, and classes.  "
+            "Default is to NOT obfuscate."
+        )
+    )
+    parser.add_option(
+        "--obfuscate-classes",
+        action="store_true",
+        dest="obf_classes",
+        default=False,
+        help="Obfuscate class names."
+    )
+    parser.add_option(
+        "--obfuscate-functions",
+        action="store_true",
+        dest="obf_functions",
+        default=False,
+        help="Obfuscate function and method names."
+    )
+    parser.add_option(
+        "--obfuscate-variables",
+        action="store_true",
+        dest="obf_variables",
+        default=False,
+        help="Obfuscate variable names."
+    )
+    parser.add_option(
+        "--obfuscate-import-methods",
+        action="store_true",
+        dest="obf_import_methods",
+        default=False,
+        help="Obfuscate globally-imported mouled methods (e.g. 'Ag=re.compile')."
+    )
+    parser.add_option(
+        "--obfuscate-builtins",
+        action="store_true",
+        dest="obf_builtins",
+        default=False,
+        help="Obfuscate built-ins (i.e. True, False, object, Exception, etc)."
+    )
+    parser.add_option(
+        "--replacement-length",
+        dest="replacement_length",
+        default=1,
+        help=(
+            "The length of the random names that will be used when obfuscating "
+            "identifiers."
+        ),
+        metavar="1"
+    )
+    parser.add_option(
+        "--nonlatin",
+        action="store_true",
+        dest="use_nonlatin",
+        default=False,
+        help=(
+            "Use non-latin (unicode) characters in obfuscation (Python 3 only)."
+            "  WARNING: This results in some SERIOUSLY hard-to-read code."
+        )
+    )
+    parser.add_option(
+        "--prepend",
+        dest="prepend",
+        default=None,
+        help=(
+            "Prepend the text in this file to the top of our output.  "
+            "e.g. A copyright notice."
+        ),
+        metavar="<file path>"
+    )
+    return parser
+
+def main():
+    parser = get_parser()
+    options, files = parser.parse_args()
+    if not files:
+        parser.print_help()
+        sys.exit(2)
+    pyminify(options, files)
+
+
+if __name__ == "__main__":
+    main()
+
+
+def runpym(sysargs):
+    parser = get_parser()
+    options, files = parser.parse_args(sysargs)
+    if not files:
+        parser.print_help()
+        sys.exit(2)
+    pyminify(options, files)
+    pass
\ No newline at end of file
diff --git a/src/pyminifier_bundled/analyze.py b/src/pyminifier_bundled/analyze.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1968d87bd57d81feec5bfd33e84659faaea970b
--- /dev/null
+++ b/src/pyminifier_bundled/analyze.py
@@ -0,0 +1,430 @@
+# -*- coding: utf-8 -*-
+
+__doc__ = """\
+A module of useful functions for analyzing Python code.
+"""
+
+# Import builtins
+import os, sys, re, tokenize, keyword
+try:
+    import cStringIO as io
+except ImportError: # Ahh, Python 3
+    import io
+
+# Globals
+py3 = False
+
+if not isinstance(sys.version_info, tuple):
+    if sys.version_info.major == 3:
+        py3 = True
+
+shebang = re.compile('^#\!.*$')
+encoding = re.compile(".*coding[:=]\s*([-\w.]+)")
+# __builtins__ is different for every module so we need a hard-coded list:
+builtins = [
+    'ArithmeticError',
+    'AssertionError',
+    'AttributeError',
+    'BaseException',
+    'BufferError',
+    'BytesWarning',
+    'DeprecationWarning',
+    'EOFError',
+    'Ellipsis',
+    'EnvironmentError',
+    'Exception',
+    'False',
+    'FloatingPointError',
+    'FutureWarning',
+    'GeneratorExit',
+    'IOError',
+    'ImportError',
+    'ImportWarning',
+    'IndentationError',
+    'IndexError',
+    'KeyError',
+    'KeyboardInterrupt',
+    'LookupError',
+    'MemoryError',
+    'NameError',
+    'None',
+    'NotImplemented',
+    'NotImplementedError',
+    'OSError',
+    'OverflowError',
+    'PendingDeprecationWarning',
+    'ReferenceError',
+    'RuntimeError',
+    'RuntimeWarning',
+    'StandardError',
+    'StopIteration',
+    'SyntaxError',
+    'SyntaxWarning',
+    'SystemError',
+    'SystemExit',
+    'TabError',
+    'True',
+    'TypeError',
+    'UnboundLocalError',
+    'UnicodeDecodeError',
+    'UnicodeEncodeError',
+    'UnicodeError',
+    'UnicodeTranslateError',
+    'UnicodeWarning',
+    'UserWarning',
+    'ValueError',
+    'Warning',
+    'ZeroDivisionError',
+    '__IPYTHON__',
+    '__IPYTHON__active',
+    '__debug__',
+    '__doc__',
+    '__import__',
+    '__name__',
+    '__package__',
+    'abs',
+    'all',
+    'any',
+    'apply',
+    'basestring',
+    'bin',
+    'bool',
+    'buffer',
+    'bytearray',
+    'bytes',
+    'callable',
+    'chr',
+    'classmethod',
+    'cmp',
+    'coerce',
+    'compile',
+    'complex',
+    'copyright',
+    'credits',
+    'delattr',
+    'dict',
+    'dir',
+    'divmod',
+    'dreload',
+    'enumerate',
+    'eval',
+    'execfile',
+    'exit',
+    'file',
+    'filter',
+    'float',
+    'format',
+    'frozenset',
+    'getattr',
+    'globals',
+    'hasattr',
+    'hash',
+    'help',
+    'hex',
+    'id',
+    'input',
+    'int',
+    'intern',
+    'ip_set_hook',
+    'ipalias',
+    'ipmagic',
+    'ipsystem',
+    'isinstance',
+    'issubclass',
+    'iter',
+    'jobs',
+    'len',
+    'license',
+    'list',
+    'locals',
+    'long',
+    'map',
+    'max',
+    'min',
+    'next',
+    'object',
+    'oct',
+    'open',
+    'ord',
+    'pow',
+    'print',
+    'property',
+    'quit',
+    'range',
+    'raw_input',
+    'reduce',
+    'reload',
+    'repr',
+    'reversed',
+    'round',
+    'set',
+    'setattr',
+    'slice',
+    'sorted',
+    'staticmethod',
+    'str',
+    'sum',
+    'super',
+    'tuple',
+    'type',
+    'unichr',
+    'unicode',
+    'vars',
+    'xrange',
+    'zip'
+]
+
+reserved_words = keyword.kwlist + builtins
+
+def enumerate_keyword_args(tokens):
+    """
+    Iterates over *tokens* and returns a dictionary with function names as the
+    keys and lists of keyword arguments as the values.
+    """
+    keyword_args = {}
+    inside_function = False
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.NEWLINE:
+            inside_function = False
+        if token_type == tokenize.NAME:
+            if token_string == "def":
+                function_name = tokens[index+1][1]
+                inside_function = function_name
+                keyword_args.update({function_name: []})
+            elif inside_function:
+                if tokens[index+1][1] == '=': # keyword argument
+                    keyword_args[function_name].append(token_string)
+    return keyword_args
+
+def enumerate_imports(tokens):
+    """
+    Iterates over *tokens* and returns a list of all imported modules.
+
+    .. note:: This ignores imports using the 'as' and 'from' keywords.
+    """
+    imported_modules = []
+    import_line = False
+    from_import = False
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.NEWLINE:
+            import_line = False
+            from_import = False
+        elif token_string == "import":
+            import_line = True
+        elif token_string == "from":
+            from_import = True
+        elif import_line:
+            if token_type == tokenize.NAME and tokens[index+1][1] != 'as':
+                if not from_import:
+                    if token_string not in reserved_words:
+                        if token_string not in imported_modules:
+                            imported_modules.append(token_string)
+    return imported_modules
+
+def enumerate_global_imports(tokens):
+    """
+    Returns a list of all globally imported modules (skips modules imported
+    inside of classes, methods, or functions).  Example::
+
+        >>> enumerate_global_modules(tokens)
+        ['sys', 'os', 'tokenize', 're']
+
+    .. note::
+
+        Does not enumerate imports using the 'from' or 'as' keywords.
+    """
+    imported_modules = []
+    import_line = False
+    from_import = False
+    parent_module = ""
+    function_count = 0
+    indentation = 0
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.INDENT:
+            indentation += 1
+        elif token_type == tokenize.DEDENT:
+            indentation -= 1
+        elif token_type == tokenize.NEWLINE:
+            import_line = False
+            from_import = False
+        elif token_type == tokenize.NAME:
+            if token_string in ["def", "class"]:
+                function_count += 1
+            if indentation == function_count - 1:
+                function_count -= 1
+            elif function_count >= indentation:
+                if token_string == "import":
+                    import_line = True
+                elif token_string == "from":
+                    from_import = True
+                elif import_line:
+                    if token_type == tokenize.NAME \
+                        and tokens[index+1][1] != 'as':
+                        if not from_import \
+                            and token_string not in reserved_words:
+                            if token_string not in imported_modules:
+                                if tokens[index+1][1] == '.': # module.module
+                                    parent_module = token_string + '.'
+                                else:
+                                    if parent_module:
+                                        module_string = (
+                                            parent_module + token_string)
+                                        imported_modules.append(module_string)
+                                        parent_module = ''
+                                    else:
+                                        imported_modules.append(token_string)
+
+    return imported_modules
+
+# TODO: Finish this (even though it isn't used):
+def enumerate_dynamic_imports(tokens):
+    """
+    Returns a dictionary of all dynamically imported modules (those inside of
+    classes or functions) in the form of {<func or class name>: [<modules>]}
+
+    Example:
+        >>> enumerate_dynamic_modules(tokens)
+        {'myfunc': ['zlib', 'base64']}
+    """
+    imported_modules = []
+    import_line = False
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.NEWLINE:
+            import_line = False
+        elif token_string == "import":
+            try:
+                if tokens[index-1][0] == tokenize.NEWLINE:
+                    import_line = True
+            except IndexError:
+                import_line = True # Just means this is the first line
+        elif import_line:
+            if token_type == tokenize.NAME and tokens[index+1][1] != 'as':
+                if token_string not in reserved_words:
+                    if token_string not in imported_modules:
+                        imported_modules.append(token_string)
+    return imported_modules
+
+def enumerate_method_calls(tokens, modules):
+    """
+    Returns a list of all object (not module) method calls in the given tokens.
+
+    *modules* is expected to be a list of all global modules imported into the
+    source code we're working on.
+
+    For example:
+        >>> enumerate_method_calls(tokens)
+        ['re.compile', 'sys.argv', 'f.write']
+    """
+    out = []
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.NAME:
+            next_tok_string = tokens[index+1][1]
+            if next_tok_string == '(': # Method call
+                prev_tok_string = tokens[index-1][1]
+                # Check if we're attached to an object or module
+                if prev_tok_string == '.': # We're attached
+                    prev_prev_tok_string = tokens[index-2][1]
+                    if prev_prev_tok_string not in ['""',"''", ']', ')', '}']:
+                        if prev_prev_tok_string not in modules:
+                            to_replace = "%s.%s" % (
+                                prev_prev_tok_string, token_string)
+                            if to_replace not in out:
+                                out.append(to_replace)
+    return out
+
+def enumerate_builtins(tokens):
+    """
+    Returns a list of all the builtins being used in *tokens*.
+    """
+    out = []
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_string in builtins:
+            # Note: I need to test if print can be replaced in Python 3
+            special_special = ['print'] # Print is special in Python 2
+            if py3:
+                special_special = []
+            if token_string not in special_special:
+                if not token_string.startswith('__'): # Don't count magic funcs
+                    if tokens[index-1][1] != '.' and tokens[index+1][1] != '=':
+                        if token_string not in out:
+                            out.append(token_string)
+    return out
+
+def enumerate_import_methods(tokens):
+    """
+    Returns a list of imported module methods (such as re.compile) inside
+    *tokens*.
+    """
+    global_imports = enumerate_global_imports(tokens)
+    out = []
+    for item in global_imports:
+        for index, tok in enumerate(tokens):
+            try:
+                next_tok = tokens[index+1]
+                try:
+                    next_next_tok = tokens[index+2]
+                except IndexError:
+                    # Pretend it is a newline
+                    next_next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+            except IndexError: # Last token, no biggie
+                # Pretend it is a newline here too
+                next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+            token_type = tok[0]
+            token_string = tok[1]
+            if token_string == item:
+                if next_tok[1] == '.': # We're calling a method
+                    module_method = "%s.%s" % (token_string, next_next_tok[1])
+                    if module_method not in out:
+                        out.append(module_method)
+    return out
+
+def enumerate_local_modules(tokens, path):
+    """
+    Returns a list of modules inside *tokens* that are local to *path*.
+
+    **Note:**  Will recursively look inside *path* for said modules.
+    """
+    # Have to get a list of all modules before we can do anything else
+    modules = enumerate_imports(tokens)
+    local_modules = []
+    parent = ""
+    # Now check the local dir for matching modules
+    for root, dirs, files in os.walk(path):
+        if not parent:
+            parent = os.path.split(root)[1]
+        for f in files:
+            if f.endswith('.py'):
+                f = f[:-3] # Strip .py
+                module_tree = root.split(parent)[1].replace('/', '.')
+                module_tree = module_tree.lstrip('.')
+                if module_tree:
+                    module = "%s.%s" % (module_tree, f)
+                else:
+                    module = f
+                if not module in modules:
+                    local_modules.append(module)
+    return local_modules
+
+def get_shebang(tokens):
+    """
+    Returns the shebang string in *tokens* if it exists.  None if not.
+    """
+    # This (short) loop preserves shebangs and encoding strings:
+    for tok in tokens[0:4]: # Will always be in the first four tokens
+        line = tok[4]
+        # Save the first comment line if it starts with a shebang
+        # (e.g. '#!/usr/bin/env python')
+        if shebang.match(line): # Must be first line
+            return line
diff --git a/src/pyminifier_bundled/compression.py b/src/pyminifier_bundled/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5d2ab663c08364b2a15eee0b85ef0e327604e50
--- /dev/null
+++ b/src/pyminifier_bundled/compression.py
@@ -0,0 +1,284 @@
+# -*- coding: utf-8 -*-
+
+__doc__ = """\
+compression.py - A module providing functions to turn a python script into a
+self-executing archive in a few different formats...
+
+**gz_pack format:**
+
+    - Typically provides better compression than bzip2 (for Python scripts).
+    - Scripts compressed via this method can still be imported as modules.
+    - The resulting binary data is base64-encoded which isn't optimal compression.
+
+**bz2_pack format:**
+
+    - In some cases may provide better compression than gzip.
+    - Scripts compressed via this method can still be imported as modules.
+    - The resulting binary data is base64-encoded which isn't optimal compression.
+
+**lzma_pack format:**
+
+    - In some cases may provide better compression than bzip2.
+    - Scripts compressed via this method can still be imported as modules.
+    - The resulting binary data is base64-encoded which isn't optimal compression.
+
+The gz_pack, bz2_pack, and lzma_pack formats only work on individual .py
+files.  To pack a number of files at once using this method use the
+``--destdir`` command line option:
+
+.. code-block: shell
+
+    $ pyminifier --gzip --destdir=/tmp/minified *.py
+
+**zip_pack format:**
+
+    - Provides the best compression of Python scripts.
+    - Resulting script cannot be imported as a module.
+    - Any required modules that are local (implied path) will be automatically included in the archive.
+"""
+
+# Import standard library modules
+import os, sys, tempfile, shutil
+
+# Import our own supporting modules
+from . import analyze, token_utils, minification, obfuscate
+
+py3 = False
+if not isinstance(sys.version_info, tuple):
+    if sys.version_info.major == 3:
+        py3 = True
+
+def bz2_pack(source):
+    """
+    Returns 'source' as a bzip2-compressed, self-extracting python script.
+
+    .. note::
+
+        This method uses up more space than the zip_pack method but it has the
+        advantage in that the resulting .py file can still be imported into a
+        python program.
+    """
+    import bz2, base64
+    out = ""
+    # Preserve shebangs (don't care about encodings for this)
+    first_line = source.split('\n')[0]
+    if analyze.shebang.match(first_line):
+        if py3:
+            if first_line.rstrip().endswith('python'): # Make it python3
+                first_line = first_line.rstrip()
+                first_line += '3' #!/usr/bin/env python3
+        out = first_line + '\n'
+    compressed_source = bz2.compress(source.encode('utf-8'))
+    out += 'import bz2, base64\n'
+    out += "exec(bz2.decompress(base64.b64decode('"
+    out += base64.b64encode(compressed_source).decode('utf-8')
+    out += "')))\n"
+    return out
+
+def gz_pack(source):
+    """
+    Returns 'source' as a gzip-compressed, self-extracting python script.
+
+    .. note::
+
+        This method uses up more space than the zip_pack method but it has the
+        advantage in that the resulting .py file can still be imported into a
+        python program.
+    """
+    import zlib, base64
+    out = ""
+    # Preserve shebangs (don't care about encodings for this)
+    first_line = source.split('\n')[0]
+    if analyze.shebang.match(first_line):
+        if py3:
+            if first_line.rstrip().endswith('python'): # Make it python3
+                first_line = first_line.rstrip()
+                first_line += '3' #!/usr/bin/env python3
+        out = first_line + '\n'
+    compressed_source = zlib.compress(source.encode('utf-8'))
+    out += 'import zlib, base64\n'
+    out += "exec(zlib.decompress(base64.b64decode('"
+    out += base64.b64encode(compressed_source).decode('utf-8')
+    out += "')))\n"
+    return out
+
+def lzma_pack(source):
+    """
+    Returns 'source' as a lzma-compressed, self-extracting python script.
+
+    .. note::
+
+        This method uses up more space than the zip_pack method but it has the
+        advantage in that the resulting .py file can still be imported into a
+        python program.
+    """
+    import lzma, base64
+    out = ""
+    # Preserve shebangs (don't care about encodings for this)
+    first_line = source.split('\n')[0]
+    if analyze.shebang.match(first_line):
+        if py3:
+            if first_line.rstrip().endswith('python'): # Make it python3
+                first_line = first_line.rstrip()
+                first_line += '3' #!/usr/bin/env python3
+        out = first_line + '\n'
+    compressed_source = lzma.compress(source.encode('utf-8'))
+    out += 'import lzma, base64\n'
+    out += "exec(lzma.decompress(base64.b64decode('"
+    out += base64.b64encode(compressed_source).decode('utf-8')
+    out += "')))\n"
+    return out
+
+def prepend(line, path):
+    """
+    Appends *line* to the _beginning_ of the file at the given *path*.
+
+    If *line* doesn't end in a newline one will be appended to the end of it.
+    """
+    if isinstance(line, str):
+        line = line.encode('utf-8')
+    if not line.endswith(b'\n'):
+        line += b'\n'
+    temp = tempfile.NamedTemporaryFile('wb')
+    temp_name = temp.name # We really only need a random path-safe name
+    temp.close()
+    with open(temp_name, 'wb') as temp:
+        temp.write(line)
+        with open(path, 'rb') as r:
+            temp.write(r.read())
+    # Now replace the original with the modified version
+    shutil.move(temp_name, path)
+
+def zip_pack(filepath, options):
+    """
+    Creates a zip archive containing the script at *filepath* along with all
+    imported modules that are local to *filepath* as a self-extracting python
+    script.  A shebang will be appended to the beginning of the resulting
+    zip archive which will allow it to
+
+    If being run inside Python 3 and the `lzma` module is available the
+    resulting 'pyz' file will use ZIP_LZMA compression to maximize compression.
+
+    *options* is expected to be the the same options parsed from pyminifier.py
+    on the command line.
+
+    .. note::
+
+        * The file resulting from this method cannot be imported as a module into another python program (command line execution only).
+        * Any required local (implied path) modules will be automatically included (well, it does its best).
+        * The result will be saved as a .pyz file (which is an extension I invented for this format).
+    """
+    import zipfile
+    # Hopefully some day we'll be able to use ZIP_LZMA too as the compression
+    # format to save even more space...
+    compression_format = zipfile.ZIP_DEFLATED
+    cumulative_size = 0 # For tracking size reduction stats
+    # Record the filesize for later comparison
+    cumulative_size += os.path.getsize(filepath)
+    dest = options.pyz
+    z = zipfile.ZipFile(dest, "w", compression_format)
+    # Take care of minifying our primary script first:
+    source = open(filepath).read()
+    primary_tokens = token_utils.listified_tokenizer(source)
+    # Preserve shebangs (don't care about encodings for this)
+    shebang = analyze.get_shebang(primary_tokens)
+    if not shebang:
+    # We *must* have a shebang for this to work so make a conservative default:
+        shebang = "#!/usr/bin/env python"
+    if py3:
+        if shebang.rstrip().endswith('python'): # Make it python3 (to be safe)
+            shebang = shebang.rstrip()
+            shebang += '3\n' #!/usr/bin/env python3
+    if not options.nominify: # Minify as long as we don't have this option set
+        source = minification.minify(primary_tokens, options)
+    # Write out to a temporary file to add to our zip
+    temp = tempfile.NamedTemporaryFile(mode='w')
+    temp.write(source)
+    temp.flush()
+    # Need the path where the script lives for the next steps:
+    path = os.path.split(filepath)[0]
+    if not path:
+        path = os.getcwd()
+    main_py = path + '/__main__.py'
+    if os.path.exists(main_py):
+        # There's an existing __main__.py, use it
+        z.write(main_py, '__main__.py')
+        z.write(temp.name, os.path.split(filepath)[1])
+    else:
+        # No __main__.py so we rename our main script to be the __main__.py
+        # This is so it will still execute as a zip
+        z.write(filepath, '__main__.py')
+    temp.close()
+    # Now write any required modules into the zip as well
+    local_modules = analyze.enumerate_local_modules(primary_tokens, path)
+    name_generator = None # So we can tell if we need to obfuscate
+    if options.obfuscate or options.obf_classes \
+        or options.obf_functions or options.obf_variables \
+        or options.obf_builtins or options.obf_import_methods:
+        # Put together that will be used for all obfuscation functions:
+        identifier_length = int(options.replacement_length)
+        if options.use_nonlatin:
+            if sys.version_info[0] == 3:
+                name_generator = obfuscate.obfuscation_machine(
+                    use_unicode=True, identifier_length=identifier_length
+                )
+            else:
+                print(
+                    "ERROR: You can't use nonlatin characters without Python 3")
+                sys.exit(2)
+        else:
+            name_generator = obfuscate.obfuscation_machine(
+                identifier_length=identifier_length)
+        table =[{}]
+    included_modules = []
+    for module in local_modules:
+        module = module.replace('.', '/')
+        module = "%s.py" % module
+        # Add the filesize to our total
+        cumulative_size += os.path.getsize(module)
+        # Also record that we've added it to the archive
+        included_modules.append(module)
+        # Minify these files too
+        source = open(os.path.join(path, module)).read()
+        tokens = token_utils.listified_tokenizer(source)
+        maybe_more_modules = analyze.enumerate_local_modules(tokens, path)
+        for mod in maybe_more_modules:
+            if mod not in local_modules:
+                local_modules.append(mod) # Extend the current loop, love it =)
+        if not options.nominify:
+            # Perform minification (this also handles obfuscation)
+            source = minification.minify(tokens, options)
+        # Have to re-tokenize for obfucation (it's quick):
+        tokens = token_utils.listified_tokenizer(source)
+        # Perform obfuscation if any of the related options were set
+        if name_generator:
+            obfuscate.obfuscate(
+                module,
+                tokens,
+                options,
+                name_generator=name_generator,
+                table=table
+            )
+        # Convert back to text
+        result = token_utils.untokenize(tokens)
+        result += (
+                "# Created by pyminifier "
+                "(https://github.com/liftoff/pyminifier)\n")
+        # Write out to a temporary file to add to our zip
+        temp = tempfile.NamedTemporaryFile(mode='w')
+        temp.write(source)
+        temp.flush()
+        z.write(temp.name, module)
+        temp.close()
+    z.close()
+    # Finish up by writing the shebang to the beginning of the zip
+    prepend(shebang, dest)
+    os.chmod(dest, 0o755) # Make it executable (since we added the shebang)
+    pyz_filesize = os.path.getsize(dest)
+    percent_saved = round(float(pyz_filesize) / float(cumulative_size) * 100, 2)
+    print('%s saved as compressed executable zip: %s' % (filepath, dest))
+    print('The following modules were automatically included (as automagic '
+          'dependencies):\n')
+    for module in included_modules:
+        print('\t%s' % module)
+    print('\nOverall size reduction: %s%% of original size' % percent_saved)
diff --git a/src/pyminifier_bundled/important_information_about_license.py b/src/pyminifier_bundled/important_information_about_license.py
new file mode 100644
index 0000000000000000000000000000000000000000..73afdbbc23ce567b3e2c0c072491f0978529eb69
--- /dev/null
+++ b/src/pyminifier_bundled/important_information_about_license.py
@@ -0,0 +1,14 @@
+"""
+Tue Herlau, 2022: I am bundling a version of the pyminifier library since the version maintained by the original author has
+not been updated for a while and will no longer install via pypi on the python versions I want to target.
+
+All code in this folder is either a copy or very small modification of code with this copyright statement:
+
+#
+#       Copyright 2013 Liftoff Software Corporation
+#
+# For license information see LICENSE.txt
+The licence file can be found on the original repostiory.
+
+All rights belong to the original author.
+"""
\ No newline at end of file
diff --git a/src/pyminifier_bundled/minification.py b/src/pyminifier_bundled/minification.py
new file mode 100644
index 0000000000000000000000000000000000000000..54e4131018d25bd6ea2d58ef42d4db2ae53f9fee
--- /dev/null
+++ b/src/pyminifier_bundled/minification.py
@@ -0,0 +1,416 @@
+# -*- coding: utf-8 -*-
+
+__doc__ = """\
+Module for minification functions.
+"""
+
+# Import built-in modules
+import re, tokenize, keyword
+import io
+
+# Import our own modules
+from . import analyze, token_utils
+
+# Compile our regular expressions for speed
+multiline_quoted_string = re.compile(r'(\'\'\'|\"\"\")')
+not_quoted_string = re.compile(r'(\".*\'\'\'.*\"|\'.*\"\"\".*\')')
+trailing_newlines = re.compile(r'\n\n')
+multiline_indicator = re.compile('\\\\(\s*#.*)?\n')
+left_of_equals = re.compile('^.*?=')
+# The above also removes trailing comments: "test = 'blah \ # comment here"
+
+# These aren't used but they're a pretty good reference:
+double_quoted_string = re.compile(r'((?<!\\)".*?(?<!\\)")')
+single_quoted_string = re.compile(r"((?<!\\)'.*?(?<!\\)')")
+single_line_single_quoted_string = re.compile(r"((?<!\\)'''.*?(?<!\\)''')")
+single_line_double_quoted_string = re.compile(r'((?<!\\)""".*?(?<!\\)""")')
+
+def remove_comments(tokens):
+    """
+    Removes comments from *tokens* which is expected to be a list equivalent of
+    tokenize.generate_tokens() (so we can update in-place).
+
+    .. note::
+
+        * If the comment makes up the whole line, the newline will also be removed (so you don't end up with lots of blank lines).
+        * Preserves shebangs and encoding strings.
+    """
+    preserved_shebang = ""
+    preserved_encoding = ""
+    # This (short) loop preserves shebangs and encoding strings:
+    for tok in tokens[0:4]: # Will always be in the first four tokens
+        line = tok[4]
+        # Save the first comment line if it starts with a shebang
+        # (e.g. '#!/usr/bin/env python')
+        if analyze.shebang.match(line): # Must be first line
+            preserved_shebang = line
+        # Save the encoding string (must be first or second line in file)
+        # (e.g. '# -*- coding: utf-8 -*-')
+        elif analyze.encoding.match(line):
+            preserved_encoding = line
+    # Now remove comments:
+    prev_tok_type = 0
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        if token_type == tokenize.COMMENT:
+            tokens[index][1] = '' # Making it an empty string removes it
+        # TODO: Figure out a way to make this work
+        #elif prev_tok_type == tokenize.COMMENT:
+            #if token_type == tokenize.NL:
+                #tokens[index][1] = '' # Remove trailing newline
+        prev_tok_type = token_type
+    # Prepend our preserved items back into the token list:
+    if preserved_shebang: # Have to re-tokenize them
+        io_obj = io.StringIO(preserved_shebang + preserved_encoding)
+        preserved = [list(a) for a in tokenize.generate_tokens(io_obj.readline)]
+        preserved.pop() # Get rid of ENDMARKER
+        preserved.reverse() # Round and round we go!
+        for item in preserved:
+            tokens.insert(0, item)
+
+def remove_docstrings(tokens):
+    """
+    Removes docstrings from *tokens* which is expected to be a list equivalent
+    of `tokenize.generate_tokens()` (so we can update in-place).
+    """
+    prev_tok_type = None
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        if token_type == tokenize.STRING:
+            if prev_tok_type == tokenize.INDENT:
+                # Definitely a docstring
+                tokens[index][1] = '' # Remove it
+                # Remove the leftover indentation and newline:
+                tokens[index-1][1] = ''
+                tokens[index-2][1] = ''
+            elif prev_tok_type == tokenize.NL:
+                # This captures whole-module docstrings:
+                if tokens[index+1][0] == tokenize.NEWLINE:
+                    tokens[index][1] = ''
+                    # Remove the trailing newline:
+                    tokens[index+1][1] = ''
+        prev_tok_type = token_type
+
+def remove_comments_and_docstrings(source):
+    """
+    Returns *source* minus comments and docstrings.
+
+    .. note:: Uses Python's built-in tokenize module to great effect.
+
+    Example::
+
+        def noop(): # This is a comment
+            '''
+            Does nothing.
+            '''
+            pass # Don't do anything
+
+    Will become::
+
+        def noop():
+            pass
+    """
+    io_obj = io.StringIO(source)
+    out = ""
+    prev_toktype = tokenize.INDENT
+    last_lineno = -1
+    last_col = 0
+    for tok in tokenize.generate_tokens(io_obj.readline):
+        token_type = tok[0]
+        token_string = tok[1]
+        start_line, start_col = tok[2]
+        end_line, end_col = tok[3]
+        if start_line > last_lineno:
+            last_col = 0
+        if start_col > last_col:
+            out += (" " * (start_col - last_col))
+        # Remove comments:
+        if token_type == tokenize.COMMENT:
+            pass
+        # This series of conditionals removes docstrings:
+        elif token_type == tokenize.STRING:
+            if prev_toktype != tokenize.INDENT:
+        # This is likely a docstring; double-check we're not inside an operator:
+                if prev_toktype != tokenize.NEWLINE:
+                    # Note regarding NEWLINE vs NL: The tokenize module
+                    # differentiates between newlines that start a new statement
+                    # and newlines inside of operators such as parens, brackes,
+                    # and curly braces.  Newlines inside of operators are
+                    # NEWLINE and newlines that start new code are NL.
+                    # Catch whole-module docstrings:
+                    if start_col > 0:
+                        # Unlabelled indentation means we're inside an operator
+                        out += token_string
+                    # Note regarding the INDENT token: The tokenize module does
+                    # not label indentation inside of an operator (parens,
+                    # brackets, and curly braces) as actual indentation.
+                    # For example:
+                    # def foo():
+                    #     "The spaces before this docstring are tokenize.INDENT"
+                    #     test = [
+                    #         "The spaces before this string do not get a token"
+                    #     ]
+        else:
+            out += token_string
+        prev_toktype = token_type
+        last_col = end_col
+        last_lineno = end_line
+    return out
+
+def reduce_operators(source):
+    """
+    Remove spaces between operators in *source* and returns the result.
+    Example::
+
+        def foo(foo, bar, blah):
+            test = "This is a %s" % foo
+
+    Will become::
+
+        def foo(foo,bar,blah):
+            test="This is a %s"%foo
+
+    ..  note::
+
+        Also removes trailing commas and joins disjointed strings like
+        ``("foo" "bar")``.
+    """
+    io_obj = io.StringIO(source)
+    prev_tok = None
+    out_tokens = []
+    out = ""
+    last_lineno = -1
+    last_col = 0
+    nl_types = (tokenize.NL, tokenize.NEWLINE)
+    joining_strings = False
+    new_string = ""
+    for tok in tokenize.generate_tokens(io_obj.readline):
+        token_type = tok[0]
+        token_string = tok[1]
+        start_line, start_col = tok[2]
+        end_line, end_col = tok[3]
+        if start_line > last_lineno:
+            last_col = 0
+        if token_type != tokenize.OP:
+            if start_col > last_col and token_type not in nl_types:
+                if prev_tok[0] != tokenize.OP:
+                    out += (" " * (start_col - last_col))
+            if token_type == tokenize.STRING:
+                if prev_tok[0] == tokenize.STRING:
+                    # Join the strings into one
+                    string_type = token_string[0] # '' or ""
+                    prev_string_type = prev_tok[1][0]
+                    out = out.rstrip(" ") # Remove any spaces we inserted prev
+                    if not joining_strings:
+                        # Remove prev token and start the new combined string
+                        out = out[:(len(out)-len(prev_tok[1]))]
+                        prev_string = prev_tok[1].strip(prev_string_type)
+                        new_string = (
+                            prev_string + token_string.strip(string_type))
+                        joining_strings = True
+                    else:
+                        new_string += token_string.strip(string_type)
+        else:
+            if token_string in ('}', ')', ']'):
+                if prev_tok[1] == ',':
+                    out = out.rstrip(',')
+            if joining_strings:
+                # NOTE: Using triple quotes so that this logic works with
+                # mixed strings using both single quotes and double quotes.
+                out += "'''" + new_string + "'''"
+                joining_strings = False
+            if token_string == '@': # Decorators need special handling
+                if prev_tok[0] == tokenize.NEWLINE:
+                    # Ensure it gets indented properly
+                    out += (" " * (start_col - last_col))
+        if not joining_strings:
+            out += token_string
+        last_col = end_col
+        last_lineno = end_line
+        prev_tok = tok
+    return out
+
+def join_multiline_pairs(source, pair="()"):
+    """
+    Finds and removes newlines in multiline matching pairs of characters in
+    *source*.
+
+    By default it joins parens () but it will join any two characters given via
+    the *pair* variable.
+
+    .. note::
+
+        Doesn't remove extraneous whitespace that ends up between the pair.
+        Use `reduce_operators()` for that.
+
+    Example::
+
+        test = (
+            "This is inside a multi-line pair of parentheses"
+        )
+
+    Will become::
+
+        test = (            "This is inside a multi-line pair of parentheses"        )
+
+    """
+    opener = pair[0]
+    closer = pair[1]
+    io_obj = io.StringIO(source)
+    out_tokens = []
+    open_count = 0
+    for tok in tokenize.generate_tokens(io_obj.readline):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.OP and token_string in pair:
+            if token_string == opener:
+                open_count += 1
+            elif token_string == closer:
+                open_count -= 1
+            out_tokens.append(tok)
+        elif token_type in (tokenize.NL, tokenize.NEWLINE):
+            if open_count == 0:
+                out_tokens.append(tok)
+        else:
+            out_tokens.append(tok)
+    return token_utils.untokenize(out_tokens)
+
+def dedent(source, use_tabs=False):
+    """
+    Minimizes indentation to save precious bytes.  Optionally, *use_tabs*
+    may be specified if you want to use tabulators (\t) instead of spaces.
+
+    Example::
+
+        def foo(bar):
+            test = "This is a test"
+
+    Will become::
+
+        def foo(bar):
+         test = "This is a test"
+    """
+    if use_tabs:
+        indent_char = '\t'
+    else:
+        indent_char = ' '
+    io_obj = io.StringIO(source)
+    out = ""
+    last_lineno = -1
+    last_col = 0
+    prev_start_line = 0
+    indentation = ""
+    indentation_level = 0
+    for i, tok in enumerate(tokenize.generate_tokens(io_obj.readline)):
+        token_type = tok[0]
+        token_string = tok[1]
+        start_line, start_col = tok[2]
+        end_line, end_col = tok[3]
+        if start_line > last_lineno:
+            last_col = 0
+        if token_type == tokenize.INDENT:
+            indentation_level += 1
+            continue
+        if token_type == tokenize.DEDENT:
+            indentation_level -= 1
+            continue
+        indentation = indent_char * indentation_level
+        if start_line > prev_start_line:
+            if token_string in (',', '.'):
+                out += str(token_string)
+            else:
+                out += indentation + str(token_string)
+        elif start_col > last_col:
+            out += indent_char + str(token_string)
+        else:
+            out += token_string
+        prev_start_line = start_line
+        last_col = end_col
+        last_lineno = end_line
+    return out
+
+# TODO:  Rewrite this to use tokens
+def fix_empty_methods(source):
+    """
+    Appends 'pass' to empty methods/functions (i.e. where there was nothing but
+    a docstring before we removed it =).
+
+    Example::
+
+        # Note: This triple-single-quote inside a triple-double-quote is also a
+        # pyminifier self-test
+        def myfunc():
+            '''This is just a placeholder function.'''
+
+    Will become::
+
+        def myfunc(): pass
+    """
+    def_indentation_level = 0
+    output = ""
+    just_matched = False
+    previous_line = None
+    method = re.compile(r'^\s*def\s*.*\(.*\):.*$')
+    for line in source.split('\n'):
+        if len(line.strip()) > 0: # Don't look at blank lines
+            if just_matched == True:
+                this_indentation_level = len(line.rstrip()) - len(line.strip())
+                if def_indentation_level == this_indentation_level:
+                    # This method is empty, insert a 'pass' statement
+                    indent = " " * (def_indentation_level + 1)
+                    output += "%s\n%spass\n%s\n" % (previous_line, indent, line)
+                else:
+                    output += "%s\n%s\n" % (previous_line, line)
+                just_matched = False
+            elif method.match(line):
+                def_indentation_level = len(line) - len(line.strip())
+                just_matched = True
+                previous_line = line
+            else:
+                output += "%s\n" % line # Another self-test
+        else:
+            output += "\n"
+    return output
+
+def remove_blank_lines(source):
+    """
+    Removes blank lines from *source* and returns the result.
+
+    Example:
+
+    .. code-block:: python
+
+        test = "foo"
+
+        test2 = "bar"
+
+    Will become:
+
+    .. code-block:: python
+
+        test = "foo"
+        test2 = "bar"
+    """
+    io_obj = io.StringIO(source)
+    source = [a for a in io_obj.readlines() if a.strip()]
+    return "".join(source)
+
+def minify(tokens, options):
+    """
+    Performs minification on *tokens* according to the values in *options*
+    """
+    # Remove comments
+    remove_comments(tokens)
+    # Remove docstrings
+    remove_docstrings(tokens)
+    result = token_utils.untokenize(tokens)
+    # Minify our input script
+    result = multiline_indicator.sub('', result)
+    result = fix_empty_methods(result)
+    result = join_multiline_pairs(result)
+    result = join_multiline_pairs(result, '[]')
+    result = join_multiline_pairs(result, '{}')
+    result = remove_blank_lines(result)
+    result = reduce_operators(result)
+    result = dedent(result, use_tabs=options.tabs)
+    return result
diff --git a/src/pyminifier_bundled/obfuscate.py b/src/pyminifier_bundled/obfuscate.py
new file mode 100755
index 0000000000000000000000000000000000000000..a54455a3bacc6deec9a41e709566465e610a3ddb
--- /dev/null
+++ b/src/pyminifier_bundled/obfuscate.py
@@ -0,0 +1,772 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+__doc__ = """\
+A collection of functions for obfuscating code.
+"""
+
+import os, sys, tokenize, keyword, sys, unicodedata
+from random import shuffle, choice
+from itertools import permutations
+
+# Import our own modules
+from . import analyze
+from . import token_utils
+
+if not isinstance(sys.version_info, tuple):
+    if sys.version_info.major == 3:
+        unichr = chr # So we can support both 2 and 3
+
+try:
+    unichr(0x10000) # Will throw a ValueError on narrow Python builds
+    HIGHEST_UNICODE = 0x10FFFF # 1114111
+except:
+    HIGHEST_UNICODE = 0xFFFF # 65535
+
+# Reserved words can be overridden by the script that imports this module
+RESERVED_WORDS = keyword.kwlist + analyze.builtins
+VAR_REPLACEMENTS = {} # So we can reference what's already been replaced
+FUNC_REPLACEMENTS = {}
+CLASS_REPLACEMENTS = {}
+UNIQUE_REPLACEMENTS = {}
+
+def obfuscation_machine(use_unicode=False, identifier_length=1):
+    """
+    A generator that returns short sequential combinations of lower and
+    upper-case letters that will never repeat.
+
+    If *use_unicode* is ``True``, use nonlatin cryllic, arabic, and syriac
+    letters instead of the usual ABCs.
+
+    The *identifier_length* represents the length of the string to return using
+    the aforementioned characters.
+    """
+    # This generates a list of the letters a-z:
+    lowercase = list(map(chr, range(97, 123)))
+    # Same thing but ALL CAPS:
+    uppercase = list(map(chr, range(65, 90)))
+    if use_unicode:
+        # Python 3 lets us have some *real* fun:
+        allowed_categories = ('LC', 'Ll', 'Lu', 'Lo', 'Lu')
+        # All the fun characters start at 1580 (hehe):
+        big_list = list(map(chr, range(1580, HIGHEST_UNICODE)))
+        max_chars = 1000 # Ought to be enough for anybody :)
+        combined = []
+        rtl_categories = ('AL', 'R') # AL == Arabic, R == Any right-to-left
+        last_orientation = 'L'       # L = Any left-to-right
+        # Find a good mix of left-to-right and right-to-left characters
+        while len(combined) < max_chars:
+            char = choice(big_list)
+            if unicodedata.category(char) in allowed_categories:
+                orientation = unicodedata.bidirectional(char)
+                if last_orientation in rtl_categories:
+                    if orientation not in rtl_categories:
+                        combined.append(char)
+                else:
+                    if orientation in rtl_categories:
+                        combined.append(char)
+                last_orientation = orientation
+    else:
+        combined = lowercase + uppercase
+    shuffle(combined) # Randomize it all to keep things interesting
+    while True:
+        for perm in permutations(combined, identifier_length):
+            perm = "".join(perm)
+            if perm not in RESERVED_WORDS: # Can't replace reserved words
+                yield perm
+        identifier_length += 1
+
+def apply_obfuscation(source):
+    """
+    Returns 'source' all obfuscated.
+    """
+    global keyword_args
+    global imported_modules
+    tokens = token_utils.listified_tokenizer(source)
+    keyword_args = analyze.enumerate_keyword_args(tokens)
+    imported_modules = analyze.enumerate_imports(tokens)
+    variables = find_obfuscatables(tokens, obfuscatable_variable)
+    classes = find_obfuscatables(tokens, obfuscatable_class)
+    functions = find_obfuscatables(tokens, obfuscatable_function)
+    for variable in variables:
+        replace_obfuscatables(
+            tokens, obfuscate_variable, variable, name_generator)
+    for function in functions:
+        replace_obfuscatables(
+            tokens, obfuscate_function, function, name_generator)
+    for _class in classes:
+        replace_obfuscatables(tokens, obfuscate_class, _class, name_generator)
+    return token_utils.untokenize(tokens)
+
+def find_obfuscatables(tokens, obfunc, ignore_length=False):
+    """
+    Iterates over *tokens*, which must be an equivalent output to what
+    tokenize.generate_tokens() produces, calling *obfunc* on each with the
+    following parameters:
+
+        - **tokens:**     The current list of tokens.
+        - **index:**      The current position in the list.
+
+    *obfunc* is expected to return the token string if that token can be safely
+    obfuscated **or** one of the following optional values which will instruct
+    find_obfuscatables() how to proceed:
+
+        - **'__skipline__'**   Keep skipping tokens until a newline is reached.
+        - **'__skipnext__'**   Skip the next token in the sequence.
+
+    If *ignore_length* is ``True`` then single-character obfuscatables will
+    be obfuscated anyway (even though it wouldn't save any space).
+    """
+    global keyword_args
+    keyword_args = analyze.enumerate_keyword_args(tokens)
+    global imported_modules
+    imported_modules = analyze.enumerate_imports(tokens)
+    #print("imported_modules: %s" % imported_modules)
+    skip_line = False
+    skip_next = False
+    obfuscatables = []
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        if token_type == tokenize.NEWLINE:
+            skip_line = False
+        if skip_line:
+            continue
+        result = obfunc(tokens, index, ignore_length=ignore_length)
+        if result:
+            if skip_next:
+                skip_next = False
+            elif result == '__skipline__':
+                skip_line = True
+            elif result == '__skipnext__':
+                skip_next = True
+            elif result in obfuscatables:
+                pass
+            else:
+                obfuscatables.append(result)
+        else: # If result is empty we need to reset skip_next so we don't
+            skip_next = False # accidentally skip the next identifier
+    return obfuscatables
+
+# Note: I'm using 'tok' instead of 'token' since 'token' is a built-in module
+def obfuscatable_variable(tokens, index, ignore_length=False):
+    """
+    Given a list of *tokens* and an *index* (representing the current position),
+    returns the token string if it is a variable name that can be safely
+    obfuscated.
+
+    Returns '__skipline__' if the rest of the tokens on this line should be skipped.
+    Returns '__skipnext__' if the next token should be skipped.
+
+    If *ignore_length* is ``True``, even variables that are already a single
+    character will be obfuscated (typically only used with the ``--nonlatin``
+    option).
+    """
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    line = tok[4]
+    if index > 0:
+        prev_tok = tokens[index-1]
+    else: # Pretend it's a newline (for simplicity)
+        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    prev_tok_type = prev_tok[0]
+    prev_tok_string = prev_tok[1]
+    try:
+        next_tok = tokens[index+1]
+    except IndexError: # Pretend it's a newline
+        next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    next_tok_string = next_tok[1]
+    if token_string == "=":
+        return '__skipline__'
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'):
+        return None
+    if next_tok_string == ".":
+        if token_string in imported_modules:
+            return None
+    if prev_tok_string == 'import':
+        return '__skipline__'
+    if prev_tok_string == ".":
+        return '__skipnext__'
+    if prev_tok_string == "for":
+        if len(token_string) > 2:
+            return token_string
+    if token_string == "for":
+        return None
+    if token_string in keyword_args.keys():
+        return None
+    if token_string in ["def", "class", 'if', 'elif', 'import']:
+        return '__skipline__'
+    if prev_tok_type != tokenize.INDENT and next_tok_string != '=':
+        return '__skipline__'
+    if not ignore_length:
+        if len(token_string) < 3:
+            return None
+    if token_string in RESERVED_WORDS:
+        return None
+    return token_string
+
+def obfuscatable_class(tokens, index, **kwargs):
+    """
+    Given a list of *tokens* and an *index* (representing the current position),
+    returns the token string if it is a class name that can be safely
+    obfuscated.
+    """
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    if index > 0:
+        prev_tok = tokens[index-1]
+    else: # Pretend it's a newline (for simplicity)
+        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    prev_tok_string = prev_tok[1]
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'): # Don't mess with specials
+        return None
+    if prev_tok_string == "class":
+        return token_string
+
+def obfuscatable_function(tokens, index, **kwargs):
+    """
+    Given a list of *tokens* and an *index* (representing the current position),
+    returns the token string if it is a function or method name that can be
+    safely obfuscated.
+    """
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    if index > 0:
+        prev_tok = tokens[index-1]
+    else: # Pretend it's a newline (for simplicity)
+        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    prev_tok_string = prev_tok[1]
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'): # Don't mess with specials
+        return None
+    if prev_tok_string == "def":
+        return token_string
+
+def replace_obfuscatables(module, tokens, obfunc, replace, name_generator, table=None):
+    """
+    Iterates over *tokens*, which must be an equivalent output to what
+    tokenize.generate_tokens() produces, replacing the given identifier name
+    (*replace*) by calling *obfunc* on each token with the following parameters:
+
+        - **module:**       The name of the script we're currently obfuscating.
+        - **tokens:**       The current list of all tokens.
+        - **index:**        The current position.
+        - **replace:**      The token string that we're replacing.
+        - **replacement:**  A randomly generated, unique value that will be used to replace, *replace*.
+        - **right_of_equal:**   A True or False value representing whether or not the token is to the right of an equal sign.  **Note:** This gets reset to False if a comma or open paren are encountered.
+        - **inside_parens:**    An integer that is incremented whenever an open paren is encountered and decremented when a close paren is encountered.
+        - **inside_function:**  If not False, the name of the function definition we're inside of (used in conjunction with *keyword_args* to determine if a safe replacement can be made).
+
+    *obfunc* is expected to return the token string if that token can be safely
+    obfuscated **or** one of the following optional values which will instruct
+    find_obfuscatables() how to proceed:
+
+        - **'__open_paren__'**        Increment the inside_parens value
+        - **'__close_paren__'**       Decrement the inside_parens value
+        - **'__comma__'**             Reset the right_of_equal value to False
+        - **'__right_of_equal__'**    Sets the right_of_equal value to True
+
+    **Note:** The right_of_equal and the inside_parens values are reset whenever a NEWLINE is encountered.
+
+    When obfuscating a list of files, *table* is used to keep track of which
+    obfuscatable identifiers are which inside each resulting file.  It must be
+    an empty dictionary that will be populated like so::
+
+        {orig_name: obfuscated_name}
+
+    This *table* of "what is what" will be used to ensure that references from
+    one script/module that call another are kept in sync when they are replaced
+    with obfuscated values.
+    """
+    # Pretend the first line is '#\n':
+    skip_line = False
+    skip_next = False
+    right_of_equal = False
+    inside_parens = 0
+    inside_function = False
+    indent = 0
+    function_indent = 0
+    replacement = next(name_generator)
+    for index, tok in enumerate(tokens):
+        token_type = tok[0]
+        token_string = tok[1]
+        if token_type == tokenize.NEWLINE:
+            skip_line = False
+            right_of_equal = False
+            inside_parens = 0
+        elif token_type == tokenize.INDENT:
+            indent += 1
+        elif token_type == tokenize.DEDENT:
+            indent -= 1
+            if inside_function and function_indent == indent:
+                function_indent = 0
+                inside_function = False
+        if token_string == "def":
+            function_indent = indent
+            function_name = tokens[index+1][1]
+            inside_function = function_name
+        result = obfunc(
+            tokens,
+            index,
+            replace,
+            replacement,
+            right_of_equal,
+            inside_parens,
+            inside_function
+        )
+        if result:
+            if skip_next:
+                skip_next = False
+            elif skip_line:
+                pass
+            elif result == '__skipline__':
+                skip_line = True
+            elif result == '__skipnext__':
+                skip_next = True
+            elif result == '__open_paren__':
+                right_of_equal = False
+                inside_parens += 1
+            elif result == '__close_paren__':
+                inside_parens -= 1
+            elif result == '__comma__':
+                right_of_equal = False
+            elif result == '__right_of_equal__':
+                # We only care if we're right of the equal sign outside of
+                # parens (which indicates arguments)
+                if not inside_parens:
+                    right_of_equal = True
+            else:
+                if table: # Save it for later use in other files
+                    combined_name = "%s.%s" % (module, token_string)
+                    try: # Attempt to use an existing value
+                        tokens[index][1] = table[0][combined_name]
+                    except KeyError: # Doesn't exist, add it to table
+                        table[0].update({combined_name: result})
+                        tokens[index][1] = result
+                else:
+                    tokens[index][1] = result
+
+def obfuscate_variable(
+        tokens,
+        index,
+        replace,
+        replacement,
+        right_of_equal,
+        inside_parens,
+        inside_function):
+    """
+    If the token string inside *tokens[index]* matches *replace*, return
+    *replacement*. *right_of_equal*, and *inside_parens* are used to determine
+    whether or not this token is safe to obfuscate.
+    """
+    def return_replacement(replacement):
+        VAR_REPLACEMENTS[replacement] = replace
+        return replacement
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    if index > 0:
+        prev_tok = tokens[index-1]
+    else: # Pretend it's a newline (for simplicity)
+        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    prev_tok_string = prev_tok[1]
+    try:
+        next_tok = tokens[index+1]
+    except IndexError: # Pretend it's a newline
+        next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
+    if token_string == "import":
+        return '__skipline__'
+    if next_tok[1] == '.':
+        if token_string in imported_modules:
+            return None
+    if token_string == "=":
+        return '__right_of_equal__'
+    if token_string == "(":
+        return '__open_paren__'
+    if token_string == ")":
+        return '__close_paren__'
+    if token_string == ",":
+        return '__comma__'
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'):
+        return None
+    if prev_tok_string == 'def':
+        return '__skipnext__' # Don't want to touch functions
+    if token_string == replace and prev_tok_string != '.':
+        if inside_function:
+            if token_string not in keyword_args[inside_function]:
+                if not right_of_equal:
+                    if not inside_parens:
+                        return return_replacement(replacement)
+                    else:
+                        if next_tok[1] != '=':
+                            return return_replacement(replacement)
+                elif not inside_parens:
+                    return return_replacement(replacement)
+                else:
+                    if next_tok[1] != '=':
+                        return return_replacement(replacement)
+        elif not right_of_equal:
+            if not inside_parens:
+                return return_replacement(replacement)
+            else:
+                if next_tok[1] != '=':
+                    return return_replacement(replacement)
+        elif right_of_equal and not inside_parens:
+            return return_replacement(replacement)
+
+def obfuscate_function(tokens, index, replace, replacement, *args):
+    """
+    If the token string (a function) inside *tokens[index]* matches *replace*,
+    return *replacement*.
+    """
+    def return_replacement(replacement):
+        FUNC_REPLACEMENTS[replacement] = replace
+        return replacement
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    prev_tok = tokens[index-1]
+    prev_tok_string = prev_tok[1]
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'):
+        return None
+    if token_string == replace:
+        if prev_tok_string != '.':
+            if token_string == replace:
+                return return_replacement(replacement)
+        else:
+            parent_name = tokens[index-2][1]
+            if parent_name in CLASS_REPLACEMENTS:
+                # This should work for @classmethod methods
+                return return_replacement(replacement)
+            elif parent_name in VAR_REPLACEMENTS:
+                # This covers regular ol' instance methods
+                return return_replacement(replacement)
+
+def obfuscate_class(tokens, index, replace, replacement, *args):
+    """
+    If the token string (a class) inside *tokens[index]* matches *replace*,
+    return *replacement*.
+    """
+    def return_replacement(replacement):
+        CLASS_REPLACEMENTS[replacement] = replace
+        return replacement
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    prev_tok = tokens[index-1]
+    prev_tok_string = prev_tok[1]
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string.startswith('__'):
+        return None
+    if prev_tok_string != '.':
+        if token_string == replace:
+            return return_replacement(replacement)
+
+def obfuscate_unique(tokens, index, replace, replacement, *args):
+    """
+    If the token string (a unique value anywhere) inside *tokens[index]*
+    matches *replace*, return *replacement*.
+
+    .. note::
+
+        This function is only for replacing absolutely unique ocurrences of
+        *replace* (where we don't have to worry about their position).
+    """
+    def return_replacement(replacement):
+        UNIQUE_REPLACEMENTS[replacement] = replace
+        return replacement
+    tok = tokens[index]
+    token_type = tok[0]
+    token_string = tok[1]
+    if token_type != tokenize.NAME:
+        return None # Skip this token
+    if token_string == replace:
+        return return_replacement(replacement)
+
+def remap_name(name_generator, names, table=None):
+    """
+    Produces a series of variable assignments in the form of::
+
+        <obfuscated name> = <some identifier>
+
+    for each item in *names* using *name_generator* to come up with the
+    replacement names.
+
+    If *table* is provided, replacements will be looked up there before
+    generating a new unique name.
+    """
+    out = ""
+    for name in names:
+        if table and name in table[0].keys():
+            replacement = table[0][name]
+        else:
+            replacement = next(name_generator)
+        out += "%s=%s\n" % (replacement, name)
+    return out
+
+def insert_in_next_line(tokens, index, string):
+    """
+    Inserts the given string after the next newline inside tokens starting at
+    *tokens[index]*.  Indents must be a list of indentation tokens that will
+    preceeed the insert (can be an empty list).
+    """
+    tokenized_string = token_utils.listified_tokenizer(string)
+    for i, tok in list(enumerate(tokens[index:])):
+        token_type = tok[0]
+        if token_type in [tokenize.NL, tokenize.NEWLINE]:
+            for count, item in enumerate(tokenized_string):
+                tokens.insert(index+count+i+1, item)
+            break
+
+def obfuscate_builtins(module, tokens, name_generator, table=None):
+    """
+    Inserts an assignment, '<obfuscated identifier> = <builtin function>'  at
+    the beginning of *tokens* (after the shebang and encoding if present) for
+    every Python built-in function that is used inside *tokens*.  Also, replaces
+    all of said builti-in functions in *tokens* with each respective obfuscated
+    identifer.
+
+    Obfuscated identifier names are pulled out of name_generator via next().
+
+    If *table* is provided, replacements will be looked up there before
+    generating a new unique name.
+    """
+    used_builtins = analyze.enumerate_builtins(tokens)
+    obfuscated_assignments = remap_name(name_generator, used_builtins, table)
+    replacements = []
+    for assignment in obfuscated_assignments.split('\n'):
+        replacements.append(assignment.split('=')[0])
+    replacement_dict = dict(zip(used_builtins, replacements))
+    if table:
+        table[0].update(replacement_dict)
+    iter_replacements = iter(replacements)
+    for builtin in used_builtins:
+        replace_obfuscatables(
+            module, tokens, obfuscate_unique, builtin, iter_replacements)
+    # Check for shebangs and encodings before we do anything else
+    skip_tokens = 0
+    matched_shebang = False
+    matched_encoding = False
+    for tok in tokens[0:4]: # Will always be in the first four tokens
+        line = tok[4]
+        if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
+            if not matched_shebang:
+                matched_shebang = True
+                skip_tokens += 1
+        elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
+            if not matched_encoding:
+                matched_encoding = True
+                skip_tokens += 1
+    insert_in_next_line(tokens, skip_tokens, obfuscated_assignments)
+
+def obfuscate_global_import_methods(module, tokens, name_generator, table=None):
+    """
+    Replaces the used methods of globally-imported modules with obfuscated
+    equivalents.  Updates *tokens* in-place.
+
+    *module* must be the name of the module we're currently obfuscating
+
+    If *table* is provided, replacements for import methods will be attempted
+    to be looked up there before generating a new unique name.
+    """
+    global_imports = analyze.enumerate_global_imports(tokens)
+    #print("global_imports: %s" % global_imports)
+    local_imports = analyze.enumerate_local_modules(tokens, os.getcwd())
+    #print("local_imports: %s" % local_imports)
+    module_methods = analyze.enumerate_import_methods(tokens)
+    #print("module_methods: %s" % module_methods)
+    # Make a 1-to-1 mapping dict of module_method<->replacement:
+    if table:
+        replacement_dict = {}
+        for module_method in module_methods:
+            if module_method in table[0].keys():
+                replacement_dict.update({module_method: table[0][module_method]})
+            else:
+                replacement_dict.update({module_method: next(name_generator)})
+        # Update the global lookup table with the new entries:
+        table[0].update(replacement_dict)
+    else:
+        method_map = [next(name_generator) for i in module_methods]
+        replacement_dict = dict(zip(module_methods, method_map))
+    import_line = False
+    # Replace module methods with our obfuscated names in *tokens*
+    for module_method in module_methods:
+        for index, tok in enumerate(tokens):
+            token_type = tok[0]
+            token_string = tok[1]
+            if token_type != tokenize.NAME:
+                continue # Speedup
+            tokens[index+1][1]
+            if token_string == module_method.split('.')[0]:
+                if tokens[index+1][1] == '.':
+                    if tokens[index+2][1] == module_method.split('.')[1]:
+                        if table: # Attempt to use an existing value
+                            tokens[index][1] = table[0][module_method]
+                            tokens[index+1][1] = ""
+                            tokens[index+2][1] = ""
+                        else:
+                            tokens[index][1] = replacement_dict[module_method]
+                            tokens[index+1][1] = ""
+                            tokens[index+2][1] = ""
+    # Insert our map of replacement=what after each respective module import
+    for module_method, replacement in replacement_dict.items():
+        indents = []
+        index = 0
+        for tok in tokens[:]:
+            token_type = tok[0]
+            token_string = tok[1]
+            if token_type == tokenize.NEWLINE:
+                import_line = False
+            elif token_type == tokenize.INDENT:
+                indents.append(tok)
+            elif token_type == tokenize.DEDENT:
+                indents.pop()
+            elif token_string == "import":
+                import_line = True
+            elif import_line:
+                if token_string == module_method.split('.')[0]:
+                    # Insert the obfuscation assignment after the import
+                    imported_module = ".".join(module_method.split('.')[:-1])
+                    if table and imported_module in local_imports:
+                        line = "%s=%s.%s\n" % ( # This ends up being 6 tokens
+                            replacement_dict[module_method],
+                            imported_module,
+                            replacement_dict[module_method]
+                        )
+                    else:
+                        line = "%s=%s\n" % ( # This ends up being 6 tokens
+                            replacement_dict[module_method], module_method)
+                    for indent in indents: # Fix indentation
+                        line = "%s%s" % (indent[1], line)
+                        index += 1
+                    insert_in_next_line(tokens, index, line)
+                    index += 6 # To make up for the six tokens we inserted
+            index += 1
+
+def obfuscate(module, tokens, options, name_generator=None, table=None):
+    """
+    Obfuscates *tokens* in-place.  *options* is expected to be the options
+    variable passed through from pyminifier.py.
+
+    *module* must be the name of the module we're currently obfuscating
+
+    If *name_generator* is provided it will be used to obtain replacement values
+    for identifiers.  If not, a new instance of
+
+    If *table* is given (should be a list containing a single dictionary), it
+    will be used to perform lookups of replacements and any new replacements
+    will be added to it.
+    """
+    # Need a universal instance of our generator to avoid duplicates
+    identifier_length = int(options.replacement_length)
+    ignore_length = False
+    if not name_generator:
+        if options.use_nonlatin:
+            ignore_length = True
+            if sys.version_info[0] == 3:
+                name_generator = obfuscation_machine(
+                    use_unicode=True, identifier_length=identifier_length)
+            else:
+                print(
+                    "ERROR: You can't use nonlatin characters without Python 3")
+        else:
+            name_generator = obfuscation_machine(
+                identifier_length=identifier_length)
+    if options.obfuscate:
+        variables = find_obfuscatables(
+            tokens, obfuscatable_variable, ignore_length=ignore_length)
+        classes = find_obfuscatables(
+            tokens, obfuscatable_class)
+        functions = find_obfuscatables(
+            tokens, obfuscatable_function)
+        for variable in variables:
+            replace_obfuscatables(
+                module,
+                tokens,
+                obfuscate_variable,
+                variable,
+                name_generator,
+                table
+            )
+        for function in functions:
+            replace_obfuscatables(
+                module,
+                tokens,
+                obfuscate_function,
+                function,
+                name_generator,
+                table
+            )
+        for _class in classes:
+            replace_obfuscatables(
+                module, tokens, obfuscate_class, _class, name_generator, table)
+        obfuscate_global_import_methods(module, tokens, name_generator, table)
+        obfuscate_builtins(module, tokens, name_generator, table)
+    else:
+        if options.obf_classes:
+            classes = find_obfuscatables(
+                tokens, obfuscatable_class)
+            for _class in classes:
+                replace_obfuscatables(
+                    module,
+                    tokens,
+                    obfuscate_class,
+                    _class,
+                    name_generator,
+                    table
+                )
+        if options.obf_functions:
+            functions = find_obfuscatables(
+                tokens, obfuscatable_function)
+            for function in functions:
+                replace_obfuscatables(
+                    module,
+                    tokens,
+                    obfuscate_function,
+                    function,
+                    name_generator,
+                    table
+                )
+        if options.obf_variables:
+            variables = find_obfuscatables(
+                tokens, obfuscatable_variable)
+            for variable in variables:
+                replace_obfuscatables(
+                    module,
+                    tokens,
+                    obfuscate_variable,
+                    variable,
+                    name_generator,
+                    table
+                )
+        if options.obf_import_methods:
+            obfuscate_global_import_methods(
+                module, tokens, name_generator, table)
+        if options.obf_builtins:
+            obfuscate_builtins(module, tokens, name_generator, table)
+
+if __name__ == "__main__":
+    global name_generator
+    try:
+        source = open(sys.argv[1]).read()
+    except:
+        print("Usage: %s <filename.py>" % sys.argv[0])
+        sys.exit(1)
+    if sys.version_info[0] == 3:
+        name_generator = obfuscation_machine(use_unicode=True)
+    else:
+        name_generator = obfuscation_machine(identifier_length=1)
+    source = apply_obfuscation(source)
+    print(source)
diff --git a/src/pyminifier_bundled/token_utils.py b/src/pyminifier_bundled/token_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7afa4cf10edb4070113efb49a3011857e9c3ae2
--- /dev/null
+++ b/src/pyminifier_bundled/token_utils.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+__doc__ = """\
+A couple of functions for dealing with tokens generated by the `tokenize`
+module.
+"""
+
+import tokenize
+import io
+
+def untokenize(tokens):
+    """
+    Converts the output of tokenize.generate_tokens back into a human-readable
+    string (that doesn't contain oddly-placed whitespace everywhere).
+
+    .. note::
+
+        Unlike :meth:`tokenize.untokenize`, this function requires the 3rd and
+        4th items in each token tuple (though we can use lists *or* tuples).
+    """
+    out = ""
+    last_lineno = -1
+    last_col = 0
+    for tok in tokens:
+        token_string = tok[1]
+        start_line, start_col = tok[2]
+        end_line, end_col = tok[3]
+        # The following two conditionals preserve indentation:
+        if start_line > last_lineno:
+            last_col = 0
+        if start_col > last_col and token_string != '\n':
+            out += (" " * (start_col - last_col))
+        out += token_string
+        last_col = end_col
+        last_lineno = end_line
+    return out
+
+def listified_tokenizer(source):
+    """Tokenizes *source* and returns the tokens as a list of lists."""
+    io_obj = io.StringIO(source)
+    return [list(a) for a in tokenize.generate_tokens(io_obj.readline)]
diff --git a/src/unitgrade_devel.egg-info/PKG-INFO b/src/unitgrade_devel.egg-info/PKG-INFO
index 3825a47257c27e369284a5234b345a6fd04d9ada..9c26b28cb1d7273fd63dc43d8a0cde6b4e932005 100644
--- a/src/unitgrade_devel.egg-info/PKG-INFO
+++ b/src/unitgrade_devel.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: unitgrade-devel
-Version: 0.1.41
+Version: 0.1.42
 Summary: A set of tools to develop unitgrade tests and reports and later evaluate them
 Home-page: https://lab.compute.dtu.dk/tuhe/unitgrade_private
 Author: Tue Herlau
diff --git a/src/unitgrade_devel.egg-info/SOURCES.txt b/src/unitgrade_devel.egg-info/SOURCES.txt
index 014650545863f8d900fcb0232973ab06b7450224..7bf3602b040c1b8a9e691a1b3468a00d434c7f8d 100644
--- a/src/unitgrade_devel.egg-info/SOURCES.txt
+++ b/src/unitgrade_devel.egg-info/SOURCES.txt
@@ -3,6 +3,14 @@ MANIFEST.in
 README.md
 pyproject.toml
 setup.py
+src/pyminifier_bundled/__init__.py
+src/pyminifier_bundled/__main__.py
+src/pyminifier_bundled/analyze.py
+src/pyminifier_bundled/compression.py
+src/pyminifier_bundled/important_information_about_license.py
+src/pyminifier_bundled/minification.py
+src/pyminifier_bundled/obfuscate.py
+src/pyminifier_bundled/token_utils.py
 src/unitgrade_devel.egg-info/PKG-INFO
 src/unitgrade_devel.egg-info/SOURCES.txt
 src/unitgrade_devel.egg-info/dependency_links.txt
diff --git a/src/unitgrade_devel.egg-info/requires.txt b/src/unitgrade_devel.egg-info/requires.txt
index c27729819985e85184b07e311121ddc4c95a6f6a..be6980bb8a316cf5daf03583a15d73ef2c27c4b4 100644
--- a/src/unitgrade_devel.egg-info/requires.txt
+++ b/src/unitgrade_devel.egg-info/requires.txt
@@ -2,9 +2,7 @@ codesnipper
 colorama
 coverage
 mosspy
-mosspy
 numpy
 pyfiglet
 tabulate
 tqdm
-unitgrade
diff --git a/src/unitgrade_devel.egg-info/top_level.txt b/src/unitgrade_devel.egg-info/top_level.txt
index 4ee30e0b4748ead169757292713911d9c9fd7beb..afce43e42978ac41b718215f5bb9e84ca7f432ae 100644
--- a/src/unitgrade_devel.egg-info/top_level.txt
+++ b/src/unitgrade_devel.egg-info/top_level.txt
@@ -1 +1,2 @@
+pyminifier_bundled
 unitgrade_private
diff --git a/src/unitgrade_private/hidden_create_files.py b/src/unitgrade_private/hidden_create_files.py
index dde1be0d4924a44e16795451a21ba88b82a77cbb..2924e2239ad103101289b5220c5b70b014f22226 100644
--- a/src/unitgrade_private/hidden_create_files.py
+++ b/src/unitgrade_private/hidden_create_files.py
@@ -50,18 +50,27 @@ def setup_grade_file_report(ReportClass, execute=False, obfuscate=False, minify=
     # report.url = None # We set the URL to none to skip the consistency checks with the remote source.
     payload = report._setup_answers(with_coverage=with_coverage, verbose=verbose)
     payload['config'] = {}
-    artifacts = {}
-    artifacts['questions'] = {}
 
-    db = PupDB(report._artifact_file())
-    db.set('encoding_scheme', " from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;")
+    # Save metadata about the test for use with the dashboard (and nothing else). Do not use this data for any part of the evaluation, etc. -- only the dashboard!
+    # Don't save using diskcache, as we want to easily be able to remove the diskcache files without any issues.
+    # db = {}
+    # db = PupDB(report._artifact_file())
     from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict
-
+    artifacts = {}
+    artifacts['questions'] = {}
     root_dir, relative_path, modules = report._import_base_relative()
-    db.set('root_dir', root_dir)
-    db.set('relative_path', relative_path)
-    db.set('modules', modules)
-    db.set('token_stub', os.path.dirname(relative_path) +"/" + ReportClass.__name__ + "_handin")
+    db = {'encoding_scheme': "from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;",
+          'root_dir': root_dir,
+          'relative_path': relative_path,
+         'modules': modules,
+         'token_stub': os.path.dirname(relative_path) + "/" + ReportClass.__name__ + "_handin",
+          }
+    # db.set('encoding_scheme',
+    #        "from unitgrade_private.hidden_gather_upload import dict2picklestring, picklestring2dict;")
+    # db.set('root_dir', root_dir)
+    # db.set('relative_path', relative_path)
+    # db.set('modules', modules)
+    # db.set('token_stub', os.path.dirname(relative_path) +"/" + ReportClass.__name__ + "_handin")
 
     # Set up the artifact file. Do this by looping over all tests in the report. Assumes that all are of the form UTestCase.
     from unitgrade.evaluate import SequentialTestLoader
@@ -69,6 +78,19 @@ def setup_grade_file_report(ReportClass, execute=False, obfuscate=False, minify=
     for q, points in report.questions:
         artifacts['questions'][q.__qualname__] = {'title': q.question_title(), 'tests': {} }
         suite = loader.loadTestsFromTestCase(q)
+        from unitgrade.framework import classmethod_dashboard
+
+        if 'setUpClass' in q.__dict__ and isinstance(q.__dict__['setUpClass'], classmethod_dashboard):
+            ikey = tuple( os.path.basename( q._artifact_file_for_setUpClass() )[:-5].split("-") )
+
+
+            artifacts['questions'][q.__qualname__]['tests'][ikey] = {'title': 'setUpClass',
+                                                                   'artifact_file': os.path.relpath(q._artifact_file_for_setUpClass(),
+                                                                                                    root_dir),
+                                                                   # t._artifact_file(),
+                                                                   'hints': None,
+                                                                   'coverage_files': q()._get_coverage_files(),
+                                                                   }
         for t in suite._tests:
             id = t.cache_id()
             cf = t._get_coverage_files()
@@ -78,30 +100,18 @@ def setup_grade_file_report(ReportClass, execute=False, obfuscate=False, minify=
                                                                    'hints': t._get_hints(),
                                                                    'coverage_files': cf
                                                                    }
+            a = 34
     s, _ = dict2picklestring(artifacts['questions'])
-    db.set('questions', s)
-
-    # I think it is best to put this into the report stuff.
-    # import pupdb
-    # report = Report2()
-    # Trash other artifact files except the main file.
+    db['questions'] = artifacts['questions'] # ('questions', s)
+    with open(report._artifact_file(), 'wb') as f:
+        pickle.dump(db, f)
 
-    for f in glob.glob(os.path.dirname(report._artifact_file()) + "/*.json"):
+    for f in glob.glob(os.path.dirname(report._artifact_file()) + "/*.json") + glob.glob(os.path.dirname(report._artifact_file()) + "/cache.db*"): # blow old artifact files. should probably also blow the test cache.
         if os.path.basename(f).startswith("main_config"):
             continue
         else:
             os.remove(f)
 
-
-    # import json
-    # js = json.dumps(artifacts['questions'])
-    # import pickle
-
-    # pickle.loads(pk)
-    # json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
-
-
-
     from unitgrade_private.hidden_gather_upload import gather_report_source_include
     sources = gather_report_source_include(report)
     known_hashes = [v for s in sources.values() for v in s['blake2b_file_hashes'].values() ]
@@ -167,10 +177,15 @@ def setup_grade_file_report(ReportClass, execute=False, obfuscate=False, minify=
         if bzip: extra.append("--bzip2")
         if minify:
             obs += " --replacement-length=20"
+        from pyminifier_bundled.__main__ import runpym
 
         cmd = f'pyminifier {obs} {" ".join(extra)} -o {output} {output}'
-        print(cmd)
-        os.system(cmd)
+        import shlex
+        # shlex.split(cmd)
+        sysargs = shlex.split(cmd) # cmd.split(" ")
+        runpym(sysargs[1:])
+        # print(cmd)
+        # os.system(cmd)
         time.sleep(0.2)
         with open(output, 'r') as f:
             sauce = f.read().splitlines()
diff --git a/src/unitgrade_private/hidden_gather_upload.py b/src/unitgrade_private/hidden_gather_upload.py
index 97f8a76704f2c71c5d6f6eb6be631067bca294dd..d4f4983a4c98bde119b6a609c22c5ca3d3c63328 100644
--- a/src/unitgrade_private/hidden_gather_upload.py
+++ b/src/unitgrade_private/hidden_gather_upload.py
@@ -1,20 +1,16 @@
 from unitgrade.evaluate import evaluate_report, python_code_str_id
-import lzma
-import base64
 import textwrap
-import hashlib
 import bz2
 import pickle
 import os
 import zipfile
 import io
-
+from unitgrade.utils import picklestring2dict, dict2picklestring, load_token, token_sep
 
 def bzwrite(json_str, token): # to get around obfuscation issues
     with getattr(bz2, 'open')(token, "wt") as f:
         f.write(json_str)
 
-
 def gather_imports(imp):
     resources = {}
     m = imp
@@ -158,20 +154,6 @@ def gather_upload_to_campusnet(report, output_dir=None, token_include_plaintext_
         print(">", token)
 
 
-
-def dict2picklestring(dd):
-    b = lzma.compress(pickle.dumps(dd))
-    b_hash = hashlib.blake2b(b).hexdigest()
-    return base64.b64encode(b).decode("utf-8"), b_hash
-
-def picklestring2dict(picklestr):
-    b = base64.b64decode(picklestr)
-    hash = hashlib.blake2b(b).hexdigest()
-    dictionary = pickle.loads(lzma.decompress(b))
-    return dictionary, hash
-
-
-token_sep = "-"*70 + " ..ooO0Ooo.. " + "-"*70
 def save_token(dictionary, plain_text, file_out):
     if plain_text is None:
         plain_text = ""
@@ -187,21 +169,7 @@ def save_token(dictionary, plain_text, file_out):
     with open(file_out, 'w') as f:
         f.write("\n".join(out))
 
-def load_token(file_in):
-    with open(file_in, 'r') as f:
-        s = f.read()
-    splt = s.split(token_sep)
-    data = splt[-1]
-    info = splt[-2]
-    head = token_sep.join(splt[:-2])
-    plain_text=head.strip()
-    hash, l1 = info.split(" ")
-    data = "".join( data.strip()[1:-1].splitlines() )
-    l1 = int(l1)
-    dictionary, b_hash = picklestring2dict(data)
-    assert len(data) == l1
-    assert b_hash == hash.strip()
-    return dictionary, plain_text
+
 
 
 def source_instantiate(name, report1_source, payload):
diff --git a/src/unitgrade_private/version.py b/src/unitgrade_private/version.py
index 6cb6f94cdd97cf290997d276d7d3c6a2dd9cf5ee..f47c8822cd2093b51319a6e6d2f8b8fe87259a30 100644
--- a/src/unitgrade_private/version.py
+++ b/src/unitgrade_private/version.py
@@ -1,2 +1,2 @@
-__version__ = "0.1.41"
+__version__ = "0.1.42"