From a6f84c4cdc7ec22372b5b209232f5df0fcbbd507 Mon Sep 17 00:00:00 2001
From: Tue Herlau <tuhe@dtu.dk>
Date: Mon, 13 Sep 2021 15:32:00 +0200
Subject: [PATCH] Begin removing of snipper dependency

---
 .gitignore                                    | 138 +++
 setup.py                                      |   4 +-
 .../core/__pycache__/info.cpython-38.pyc      | Bin 9310 -> 9428 bytes
 src/coursebox/core/info.py                    |  42 +-
 ...homepage_lectures_exercises.cpython-38.pyc | Bin 13394 -> 13393 bytes
 .../lecture_questions.cpython-38.pyc          | Bin 4085 -> 4084 bytes
 .../__pycache__/snipper.cpython-38.pyc        | Bin 12884 -> 12887 bytes
 src/coursebox/material/snipper.py             | 924 +++++++++---------
 8 files changed, 630 insertions(+), 478 deletions(-)
 create mode 100644 .gitignore

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5391d87
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,138 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+#   For a library or package, you might want to ignore these files since the code is
+#   intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 9e5ae5f..5d1e12e 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
 # beamer-slider
 setuptools.setup(
     name="coursebox",
-    version="0.1.1",
+    version="0.1.2",
     author="Tue Herlau",
     author_email="tuhe@dtu.dk",
     description="A course management system currently used at DTU",
@@ -30,5 +30,5 @@ setuptools.setup(
     package_dir={"": "src"},
     packages=setuptools.find_packages(where="src"),
     python_requires=">=3.8",
-    install_requires=['openpyxl', 'tika','xlwings','pybtex','langdetect','wexpect','pexpect','matplotlib','numpy','pycode_similar','jinjafy','beamer-slider','tinydb'],
+    install_requires=['numpy','pycode_similar','tika','openpyxl', 'xlwings','matplotlib','langdetect','jinjafy','beamer-slider','tinydb'],
 )
diff --git a/src/coursebox/core/__pycache__/info.cpython-38.pyc b/src/coursebox/core/__pycache__/info.cpython-38.pyc
index 7776621fd24f89ac5361a6babd38177221f59a1d..1d680dd6b8885df10e21e68905c1c7757f176b18 100644
GIT binary patch
delta 2033
zcmZ8iU2GIp6rMY~-EOzrc4_yw?QXX}&@Pk(+wv!CRVs*yLI@ycT3m;{({||2?s8{}
zWtq)B6cNP6)*FEkL$)RcQF+jf(Fdb1KA4d3WQds<c`!zJReWiD@SN$k5+}LeJ@-4`
zz4x5^H@_bI>80l5O-&vFzPkta%I|GkYVIW8EN#h>j-~5~{!ZFP{TGPjrvVzgAV>ik
zqG70mG(y{<4pGNZArbxJPtrgR^T!RJkz>`0=XXL*@QdD!)s-g687|gMWK=_81m>_v
zX@!ihkdqcU&sppD<XUy8ZH$mF`C0#Nm*t#PbCg&8r%9T}0-Fai(1j@o6hIOgwVcz2
zmSr*IqR2UMY@QFqTMEdvBBTLk_|3pJBJz8IagwfX4sIeu<3~br<G0}a3yM(e;DxY1
zn3c6tR32R+gKQdDwg*Awe}v}9kNl1BzRrt4SkA1f8*C8G1|>JEDNKfx&hqcVJIO`<
zcepq34)(+8t%fPZNKfY3toAkE8tG1me&Qrv5+m#baAQvEUl=SOlrbp!ht)SD&y%v&
zQfejwjhdbr*CJsL%_dW%v4}vsLW1d`(TETd=1Hk#N+|hgoOWLjN*icTO?}_+nj1jX
z69J8F^{!ieXw5rb5KfLvI7+RhwvylQnXP8?6XUmq%Gl9QH!e6z0n=C6U<Q^65f;gr
z3W!@wAKkPdnr_;k5_0WED;=PTd65W)Z%R-Fx_QlKx+`s_2YZ^XMUf?qI+(-(No=7_
zv>C<@+KM1548Q5d5y3kc5z2Jyf;fRwBvZl~of3=y*xMEe;C7qe9uX=*`qVNqLn(m{
z+bbZlOOX9=1bYqxI|A7rC^ecMvoRu+LPp388R2;mLi*f<qdX(#L^D+5BQ{_D0UR9>
zVbx)Vuxm<K6wkbC2$l9_0S0WH5KE0Eujz%jSAD-gmJa4JJ(Ui+gYH}w=`I|awnG@Z
zAzbwKJ>lKC?%iW2^EBiR!GQW{Y7=|ciM^2f+Tx{XoxQ**;5tE^vh#cf_7%5aO*L9m
zjn-77PpV8eT-ay%jm{J8ufw5x)CiDhVGU4Ou7WBTi!!^)8>6HB*N_s&_FEn~n@vx#
z{A@vI*MVej0Z2||PS%bwjTfTFV;t!v1n)Z7SMdroAM)R#mxH3^!FzE^J!L3!Rz2GC
z>>Mw}+RHMCGWa~KeP}zN9EGC$0Sv(u3^FgWW)nzLm?B37)(wt?$OfQd3uqHIg7*JH
zcpJEXkiP^_?9F6lT~DhyxS*PBsQFy7oXX}ia#kNrrfQFY)Ce)AGkHyub5yFw2E@9`
zQdKWx<#P##RbLZW4scqo{H#%!H4>ubs^#de`E|*|lu3mtxs0M0UE?_S$ztkIKE5hE
z5oh3B6U6m`l2IpBg(l-;(@JJ0p2_DX)u~x{aUk0Gq?%Qdamf1<%;^?FcFh7-Li;>I
zn%|3UBsciu*vOC$#3K<va*XXCLfFA>qv%uQ>R3Ln>1@Ak8;Y#yi71!4cBk>ZvK&RV
zAO-cDS|}(ina#^I4R5X;jLw#EkdF}N5VjzUA`}tuA!g$U=Me0jN}z??#TNO`U0vlc
zyNZn)2+twlxnsK!ZX&#hU>`dZtqOo7PN{4dTRjN>#ek~;e*}%{mYCHo2UzT5;H>)E
zu32J6V`q?YO)^C(DvZAt?{?h<Mt`4|<F9o4h=+t7PU40-32%0@tJUKnz=|%?3fRXV
z#0N-~ySwj`Jb%!Aj9li29Z`OyXShBO!`vnkztVGL-&wHjl}NbQcC>MISs6fTVrp8S
z&NCw?YYIflR7s@j>aqO_Rrbxf!(+W&O?RN35Dy(lkVA<uf3^3P&t6m97*<8N!<Rzs
me6Y{k7l)=7{xfdTE4rcXB;I;YzdPivb35FA&&zJV`}Dsrv;@om

delta 1916
zcmZ8hT})g>6uxKn?y@Yfu&}T!U6#L=_R`V<Efo`NL<BWP5@V^!Qo3E3yJcl}+0MNP
z#aXweG|@IFII(H0wh&`$(<V*Bi-`{!AM~LyX;SrJADZ}JVtw|B2lbo_MdD5FcV@ox
z%{gbzy)!?*^7v){Xhnrj!{<S$CvmZJ#UFw1SN5kNx^mU%ZsI{+y9lu$Z{~G8grizs
z&%=vatd2MEU1&o*!W+@nbJrW1(Q^L}D1{U1&(b?EwtmI;JHRO=%l8yE+$SefwmIpd
zOk0l^tsCD#e61@8T+6$pwyH@cVU6W=sdJdtL_1n7$vFCxn1mFZNgWk#sYzYBxhtey
zWMM7ukskEvYk=!?k(S%@0!bXFdkq|^Ys=|D@lzk$@|94~B$Fl5MG1|P5dm0U5%MC4
zb_2#XV7v(L7*N!csVq0VVu1W^%T5Sebko>n&)Xu3BhiR3Qq*btu~K0sO<`syO;Juh
z2dYV5L4o47IUgsXK+2+A8t70CUhpGUF+W6D1VnQEdil5>731i4Difz@6el=MkU&s1
zfp6i?`lG-gzz-@^)8`fwM32_I1u6AiO~;80NoNSM2r(vjQJ=Ci>Ho?WCOV4D63(7P
z7tJ7DWGNAIP0i&t%YU09&>lnz;k+6M9s*EjgJY0d|0&oGV5z@q6Yx--s58o(8lNTc
zX?3Y?9v&-K=!M8-B#L@EW!d5Y4NJ}JY{nD`Y=l&a&{4Rou7=ubKcaO`w@S~Lc04&F
zW-|}e!%&N%2c14eL41muNqn~-(pTZ@{AJPghWY_0fW{l8Z`!jALdpuscr>i>Cj3Ku
zyfLhiy}(Ygygy$l{j!p2(#xCs30wNLY!z=k$#yc*yILuIYXJC%VZ^U(H%4?}#5P{R
z{Y$QVm7}O4g^gobz;Ni^y@W|H&>kl=Ca?oIYhMDyy$-iGtgTk_eQU%R{TlCZie|!x
zg?5CgpbljBzo?B5=gXu|mf;x)*fla>2QRQ?CJxE~ub*aFCIef1E%600jE-GlZPOtG
zw6tGaW@r7jwpzEQVZdtKe_6g<mSeq}zEik6Cf=96)et|xJJ%RLNTGQHmT?HnCGX80
z-p*}rm$R8}+%gt}YO~uRdbWuk-22m}9p>&O5*<dao~*OaBVp|V!}dK@^=_$pw^Y45
zRnm)x_Ne-1*P9Jf_~oCJAsCgS6iv~SOC=T-65^UV5IMH@GoskwP|=r2r{ibD%xumQ
zpCc(QA;ffZK9M;tEVU9Djb0_1uBrbWU9pVZ-%4-%pa4Zb{g~T_+=}^fX4aa`#3xb{
z#j>uRBfVX@*@<*&QcP2P-wvfeYtHRhX|RzILDzu6M2wQ2C!iB%z$P(<;{~>7AXdo}
z=i)QT_?$Y_<b@lmujxq7Idnc@2r<{-P!B<`_zI`uHgRq-Gn28zkTbT;M8-10>UvY(
zFrA*FYax}31*~i;movpdyp9rfY9?!mo3y?_KxGKZFWx2CM_>?qOz?^7ZEhN^7T0KU
zgW!1r>a}==;0uD62%J{GN24zhV(d&x9Hv<-0p%;|*di<D&XN&x>c{3z&u_?Ei)y3!
zwVHD9K@@_l3Vh51)rb)_)v^!nsg;&rVOIT99a4qX=io!tq6by3R9C;W_La<HfZI@1
z!M4{!k}@4bFg(uv<}^htD%sXtcAtE7e~7#4R$Ei}F3H%)Fn$ssmhN8zMzD8}+HTb*
miE=taT@?2S?y5_>+o4t6-(6Q3^}4+-Z_wxWz3Q#=9{(3Ap3YwY

diff --git a/src/coursebox/core/info.py b/src/coursebox/core/info.py
index 773f4c0..ba913e1 100644
--- a/src/coursebox/core/info.py
+++ b/src/coursebox/core/info.py
@@ -76,13 +76,6 @@ def get_enrolled_students():
         students2[id] = s2
     return students2
 
-# def get_file_dir_or_template(filepath, templatepath):
-#     dn =  os.path.dirname(filepath)
-#
-#     thtools.ensure_dir_exists(  )
-#     if not os.path.exists(os.path.dirname):
-#         shutil.copyfile(templatepath, filepath)
-
 def get_instructors():
     paths = get_paths()
     instructors = xlsx_to_dicts(paths['information.xlsx'], sheet='instructors')
@@ -133,6 +126,7 @@ def lectures(info, pensum=None):
                 r.append( {'url': url, 'shorturl': shorturl, 'description': description})
         lecture["resources"] = r
         if pensum is not None:
+
             rd, html = lecture['reading'], ""
             while True:
                 i = rd.find("\\cite")
@@ -199,8 +193,6 @@ def get_forum(paths):
         k = k.split(" ")[0]
         for v in a[kk]:
             dd[k.lower()].append(v.split(",")[i])
-    # list_dict2dict_list()
-    # dd = dict_(dd)
 
     n = len(list(dd.values())[0])
     d2 = []
@@ -251,19 +243,41 @@ def class_information():
     del gi['key']
     del gi['value']
 
-    from snipper.load_citations import get_references
+    from snipper.load_citations import get_bibtex, get_aux
     if "pensum_bib" in gi:
-        refs, nrefs = get_references(paths['02450public'] + "/" + gi['pensum_bib'], gi)
-        d['references'], d['new_references'] = refs, nrefs
+        bibtex = get_bibtex(paths['02450public'] + "/" + gi['pensum_bib'])
+        # refs, nrefs = get_references(paths['02450public'] + "/" + gi['pensum_bib'], gi)
+        # d['references'], d['new_references'] = refs, nrefs
+        cmds = []
+        ls = lambda x: x if isinstance(x, list) else [x]
+        if 'tex_command' in gi:
+            for cmd, aux, display in zip(ls(gi['tex_command']), ls(gi['tex_aux']), ls(gi['tex_display'])):
+                cm = dict(command=cmd, aux=get_aux(paths['02450public'] + "/"+aux), output=display)
+                cmds.append(cm)
+
+                # ax = parse_aux(aux, bibtex=gi['bibtex'])
+                # for k in ax:
+                #     ax[k]['pyref'] = display % (ax[k]['nicelabel'],)
+                # newref[cmd] = ax
+        d['references'] = dict(bibtex=bibtex, commands=cmds)
+
+        # references = dict(bibtex=bibtex,
+        #                   # aux=auxfile,
+        #                   commands=[dict(command='\\aref2', output="(Assignment 2, %s)", aux=auxfile),
+        #                             dict(command='\\nref', output="\cite[%s]{herlau}", aux=auxfile),
+        #                             ])
+
+
     else:
-        d['references'], d['new_references'] = None, None
+        print("[info]", "No bibtex rereferences specified. Check configuration file. ")
+        d['references'] = dict(commands=[], bibtex={}) #['bibtex'] = None
     d.update(gi)
     # set first day of class if CE
     if continuing_education_mode:
         ice = xlsx_to_dicts(paths['information.xlsx'], sheet='ce', as_dict_list=True)
         d.update(ice)
 
-    d['lectures'], d['references'] = lectures(info=d, pensum=d['references'])
+    d['lectures'], d['references']['bibtex'] = lectures(info=d, pensum=d['references']['bibtex'])
 
     d['first_day_of_class'] = first_day_of_class(info=d)
     d['day_of_week_str'] = d['first_day_of_class'].strftime('%A')
diff --git a/src/coursebox/material/__pycache__/homepage_lectures_exercises.cpython-38.pyc b/src/coursebox/material/__pycache__/homepage_lectures_exercises.cpython-38.pyc
index 7fbf6d0fa20cc775ab5f264daaea2e53dae055f9..fcdc53b8ce3e88021bd3cba3dda0c41923e0c4a1 100644
GIT binary patch
delta 41
scmcbVaWR88l$V!_0SKBKj1tdm<o#>J%Tt__nUY$h2c|Yl7+Wy{02vJpT>t<8

delta 42
scmcbZaVdj0l$V!_0SNxyGfX_Yk@v3=A8&C^W=d+29)#K~ZfwN}06-QFr~m)}

diff --git a/src/coursebox/material/__pycache__/lecture_questions.cpython-38.pyc b/src/coursebox/material/__pycache__/lecture_questions.cpython-38.pyc
index e66197689c863bc31c59715e6582b5a2372276f2..f7895956a0a0421081a54b95bdef8362f7eb4490 100644
GIT binary patch
delta 40
rcmew=|3#iRl$V!_0SKBKj1q5d<Spmt<tfg|Oi3-$15=YH@h1ZS^@t49

delta 41
rcmew&|5cthl$V!_0SICw4HIu|<Spmt<1Nm~Oi3-$gHV$v@+SiT^ZN|A

diff --git a/src/coursebox/material/__pycache__/snipper.cpython-38.pyc b/src/coursebox/material/__pycache__/snipper.cpython-38.pyc
index 01866b2826347606df13268f4052d9d722791851..86e3289f6a18eab32268f984ef92621e80849da4 100644
GIT binary patch
delta 1247
zcmZXU+iO!n5XPOPrYc6GZ4+sd)+R)Iib-0p1;L64BG#f0T1|?Fn2V-0Y2BpScx$~Y
z-WjbI6pF1C5pNv4fKOJ?M_&rU0sR-$2L;gw_uG_IF%LgxW@qPoGrODd#K(kvi`_nB
ziu~!%m&SfPvwxpWreSs5AjcLN6x#tC(9ShZmoYXcO4D{r{{e|O2P$4tls{EOD8tQ_
zEi}My{FTP}$~4&`GDj<CeNGiSoOX)K-EEZOjB{n}E|i-Fpit3L?sT}F{N7op?Si?d
zl#?kXs+=tAuCT2K!M%Ld6>QAG%mZaW1F#R+4^#kE5}IAig5Ag8Ty>!W%mctdU@dS6
zI1C&C`T?aa!>i{t(<!4cuf;O^m<*eibV9_jz7><<&($kzC*Y>o?T%6p?{_y;PwFaZ
zD2mhhwcBGWO5{&&FZJ;+w~zX{#^a<Tyv);1Y+UfPT0Cjggg`m*)37H;mF4d>pXnNp
z*S6D5W6}J#M7Q{B-Qo)9Q^}?!9ycw?<b=jnUrTUMu4yFhASI>jHq9!Q!Y!%QY{fa<
z;a9%ZG{irBmu$nxxWX6xi)qw&<8LF^4(y(o=oM+3)>v<!80H0m4K%_cu!Kg8(ZF@`
z+{M=U07)E}+7VCZlj1%Ck9hmSv(>7{C-7Bse|PwM;U>o@R@9kVU_IoHdhgt*G__ep
zo%awP=5zJl>c=owNtjlVOo?Z3p8`*KtX`)V?9(<ojw5gexC#vNajk(O{8)2Q#CWH5
zSZ6|vS=O6RHH)(x3a+HP+!<_H(1-Lj5^`iETbx96vSyM$1iR{Rc{Phrt*J}*Uu)6O
zS~|)@q4hQ$1<vu8P?cBZJEi}CB4xSm-uyr3T^A0_RoC8+9qaH3uh_@i!u~4EL%aYU
z0uv=Khdmp|;4?4)oC5GX#Z6!cZ~%BZX1T8C@<P|e1$fq2wxHgS{EX`N9{&ut)~kkb
zAtGgooxQt?Zv~>&ahTLq{LAR)Oyhcb%P$+5#(6_i&2ncho9^jJiqM|!SVEWAAeK*e
zXLCZGtCEa6XqI=Tvz@V2L0D0<gKsx23Jt(i+Epu)+STnC!K&(BbvDjFns)pFu~aE3

delta 1244
zcmZXU-)mD@5XYUQn^rLzO`AxQv^F8yTTIgGE?W?`vIrv9vVy;w)`ljz(ezj3w6Tr5
zw!7<(^|!^T*ae~5s-S39E>gj#R?tTuS`aSizn~&M6n*e~PfFsldH66hXU^R3%sD4-
zk_$=uKD&LzGWi=>+0gmviT(3xiW*kOI63yppg0KFfc;$MavO7N+NkWX^zV^~bI>_M
zQGV-;QV%zlH_-^s2Fi_#<!Q3D${ekfV{0nd;j&X)?rtHSdtF<rQz%ykpitgY?sB+k
ze&xzlcfsr~<n$C0RZcI*+!0#_!J|Cp4%G{o{Xhv&2V{X`fD@>Y(ClIr>_PtMt_csp
z%mK%N?ZEHAAHWG<7*N`Jc-y*0I&I|EHCa~wCBvpAofL7b@55yHUFFZVlW=wRc-kn#
zgPyv|qOO95ytod2?(y3468WviM}z#u<ELS+^1A2*|KvSDSB*>FW{dX-YC@o#_!-#6
zQT6iYs&_QT3)Kf`+}O1K1<}9!p=Pra`c$%MNhC~*t}kh9_BVyD$u*6{4W#JGZqcmb
zN4N#Gnk_%48~n__jV5@}f5kS5j8VQE*i3hf*+2`q4`KJxL`S7<T065tVv;umchMA!
z;1-%P?gp=u_ZGIU2T0=h5ABKcfG+ML@PJ!4o~u+nPQzEtec$2NjlVhWU`3s&1=f9T
zul22sN>f{v*LfS^Nxo3)t9%Ia7YWlU^ptoE_YpA7bG0#g%6@H+<8K810T{T(C$&0i
z<!Q}Lt;S2O-MSKD%#!Rts!^QfaA+&t;;vBBhC!tNDj`Q!u*E4vi#3z{I@Dc*%d1(0
zYE50b|FzZ@-cEn=&G1fJ2MV0$_u&ek%6Ccs9z{#~GueUvoOewmxK>^J0Cw!aCwwBy
z2P1(B%tJf{?gL9DM<d?dv+%D1Bfx0@&r^&86MzH2(=kh9vHk%Oi;0Wytet&1^@ikU
zRKK@*G16SC8lIF1(UN35dpQ4AAX*)VN!|IsjA8Dr-$`>kQ_nQdyBeytI1`zy=-1<!
zp>QH2^l-W_l@S(fRC;j%&C;%PUsq=;C#;CK@kGO>a8bDGqbOWmjVUCm)>T{cyx7q3
E4Sf+N5&!@I

diff --git a/src/coursebox/material/snipper.py b/src/coursebox/material/snipper.py
index 05fd862..1ec3d0b 100644
--- a/src/coursebox/material/snipper.py
+++ b/src/coursebox/material/snipper.py
@@ -1,462 +1,462 @@
-from snipper.load_citations import find_tex_cite
-import os
-import functools
-from jinjafy import execute_command
-import textwrap
-import re
-
-COMMENT = '"""'
-def indent(l):
-    v = len(l) - len(l.lstrip())
-    return l[:v]
-
-def fix_r(lines):
-    for i,l in enumerate(lines):
-        if "#!r" in l:
-            lines[i] = indent(l) + l[l.find("#!r") + 3:].lstrip()
-    return lines
-
-def gcoms(s):
-    coms = []
-    while True:
-        i = s.find(COMMENT)
-        if i >= 0:
-            j = s.find(COMMENT, i+len(COMMENT))+3
-        else:
-            break
-        if j < 0:
-            raise Exception("comment tag not closed")
-        coms.append(s[i:j])
-        s = s[:i] + s[j:]
-        if len(coms) > 10:
-            print("long comments in file", i)
-    return coms, s
-
-def strip_tag(lines, tag):
-    lines2 = []
-    for l in lines:
-        dx = l.find(tag)
-        if dx > 0:
-            l = l[:dx]
-            if len(l.strip()) == 0:
-                l = None
-        if l is not None:
-            lines2.append(l)
-    return lines2
-
-def block_process(lines, tag, block_fun):
-    i = 0
-    didfind = False
-    lines2 = []
-    block_out = []
-    cutout = []
-    while i < len(lines):
-        l = lines[i]
-        dx = l.find(tag)
-        if dx >= 0:
-            if l.find(tag, dx+1) > 0:
-                j = i
-            else:
-                for j in range(i + 1, 10000):
-                    if j >= len(lines):
-                        print("\n".join(lines))
-                        print("very bad end-line j while fixing tag", tag)
-                        raise Exception("Bad line while fixing", tag)
-                    if lines[j].find(tag) >= 0:
-                        break
-
-            pbody = lines[i:j+1]
-            if i == j:
-                start_extra = lines[j][dx:lines[j].rfind(tag)].strip()
-                end_extra = lines[j][lines[j].rfind(tag) + len(tag):].strip()
-            else:
-                start_extra = lines[i][dx:].strip()
-                end_extra = lines[j][lines[j].rfind(tag) + len(tag):].strip()
-
-            cutout.append(pbody)
-            tmp_ = start_extra.split("=")
-            arg = None if len(tmp_) <= 1 else tmp_[1].split(" ")[0]
-            start_extra = ' '.join(start_extra.split(" ")[1:] )
-
-            pbody[0] = pbody[0][:dx]
-            if j > i:
-                pbody[-1] = pbody[-1][:pbody[-1].find(tag)]
-
-            nlines, extra = block_fun(lines=pbody, start_extra=start_extra, end_extra=end_extra, art=arg, head=lines[:i], tail=lines[j+1:])
-            lines2 += nlines
-            block_out.append(extra)
-            i = j+1
-            didfind = True
-            if "!b" in end_extra:
-                assert(False)
-        else:
-            lines2.append(l)
-            i += 1
-
-    return lines2, didfind, block_out, cutout
-
-
-def rem_nonprintable_ctrl_chars(txt):
-    """Remove non_printable ascii control characters """
-    #Removes the ascii escape chars
-    try:
-        txt = re.sub(r'[^\x20-\x7E|\x09-\x0A]','', txt)
-        # remove non-ascii characters
-        txt = repr(txt).decode('unicode_escape').encode('ascii','ignore')[1:-1]
-    except Exception as exception:
-        print(exception)
-    return txt
-
-
-def run_i(lines, file, output):
-    extra = dict(python=None, output=output, evaluated_lines=0)
-    def block_fun(lines, start_extra, end_extra, art, head="", tail="", output=None, extra=None):
-        outf = output + ("_" + art if art is not None and len(art) > 0 else "") + ".shell"
-        lines = full_strip(lines)
-        s = "\n".join(lines)
-        s.replace("...", "..") # passive-aggressively truncate ... because of #issues.
-        lines = textwrap.dedent(s).strip().splitlines()
-
-        if extra['python'] is None:
-            # import thtools
-
-            if os.name == 'nt':
-                import wexpect as we
-            else:
-                import pexpect as we
-            an = we.spawn("python", encoding="utf-8", timeout=20)
-            an.expect([">>>"])
-            extra['python'] = an
-
-        analyzer = extra['python']
-        def rsession(analyzer, lines):
-            l2 = []
-            for i, l in enumerate(lines):
-                l2.append(l)
-                if l.startswith(" ") and i < len(lines)-1 and not lines[i+1].startswith(" "):
-                    if not lines[i+1].strip().startswith("else:") and not lines[i+1].strip().startswith("elif") :
-                        l2.append("\n")
-
-            lines = l2
-            alines = []
-
-            # indented = False
-            in_dot_mode = False
-            if len(lines[-1]) > 0 and (lines[-1].startswith(" ") or lines[-1].startswith("\t")):
-                lines += [""]
-
-            for i, word in enumerate(lines):
-                analyzer.sendline(word)
-                before = ""
-                while True:
-                    analyzer.expect_exact([">>>", "..."])
-                    before += analyzer.before
-                    if analyzer.before.endswith("\n"):
-                        break
-                    else:
-                        before += analyzer.after
-
-                dotmode = analyzer.after == "..."
-                if 'dir(s)' in word:
-                    pass
-                if 'help(s.find)' in word:
-                    pass
-                if dotmode:
-                    # alines.append("..." + word)
-                    alines.append(">>>" + analyzer.before.rstrip() if not in_dot_mode else "..." + analyzer.before.rstrip())
-                    in_dot_mode = True
-                    # if i < len(lines) - 1 and not lines[i + 1].startswith(" "):
-                    #     analyzer.sendline("\n")  # going out of indentation mode .
-                    #     analyzer.expect_exact([">>>", "..."])
-                    #     alines.append("..." + analyzer.after.rstrip())
-                    #     pass
-                else:
-                    alines.append( ("..." if in_dot_mode else ">>>") + analyzer.before.rstrip())
-                    in_dot_mode = False
-            return alines
-
-        for l in (head[extra['evaluated_lines']:] + ["\n"]):
-            analyzer.sendline(l)
-            analyzer.expect_exact([">>>", "..."])
-
-
-        alines = rsession(analyzer, lines)
-        extra['evaluated_lines'] += len(head) + len(lines)
-        lines = alines
-        return lines, [outf, lines]
-    try:
-        a,b,c,_ = block_process(lines, tag="#!i", block_fun=functools.partial(block_fun, output=output, extra=extra))
-        if extra['python'] is not None:
-            extra['python'].close()
-
-        if len(c)>0:
-            kvs= { v[0] for v in c}
-            for outf in kvs:
-                out = "\n".join( ["\n".join(v[1]) for v in c if v[0] == outf] )
-                out = out.replace("\r", "")
-
-                with open(outf, 'w') as f:
-                    f.write(out)
-
-    except Exception as e:
-        print("lines are")
-        print("\n".join(lines))
-        print("Bad thing in #!i command in file", file)
-        raise e
-    return lines
-
-def save_s(lines, file, output, include_path_base=None): # save file snips to disk
-    def block_fun(lines, start_extra, end_extra, art, output, **kwargs):
-        outf = output + ("_" + art if art is not None and len(art) > 0 else "") + ".py"
-        lines = full_strip(lines)
-        return lines, [outf, lines]
-    try:
-        a,b,c,_ = block_process(lines, tag="#!s", block_fun=functools.partial(block_fun, output=output))
-
-        if len(c)>0:
-            kvs= { v[0] for v in c}
-            for outf in kvs:
-
-                out = "\n".join([f"# {include_path_base}"]  + ["\n".join(v[1]) for v in c if v[0] == outf] )
-
-                with open(outf, 'w') as f:
-                    f.write(out)
-
-    except Exception as e:
-        print("lines are")
-        print("\n".join(lines))
-        print("Bad thing in #!s command in file", file)
-        raise e
-    return lines
-
-def run_o(lines, file, output):
-    def block_fun(lines, start_extra, end_extra, art, output, **kwargs):
-        id = indent(lines[0])
-        outf = output + ("_" + art if art is not None else "") + ".txt"
-        l2 = []
-        l2 += [id + "import sys", id + f"sys.stdout = open('{outf}', 'w')"]
-        l2 += lines
-        # l2 += [indent(lines[-1]) + "sys.stdout.close()"]
-        l2 += [indent(lines[-1]) + "sys.stdout = sys.__stdout__"]
-        return l2, None
-    try:
-        lines2, didfind, extra, _ = block_process(lines, tag="#!o", block_fun=functools.partial(block_fun, output=output) )
-    except Exception as e:
-        print("Bad file: ", file)
-        print("I was cutting the #!o tag")
-        print("\n".join( lines) )
-        raise(e)
-
-    if didfind:
-        fp, ex = os.path.splitext(file)
-        file_run = fp + "_RUN_OUTPUT_CAPTURE" +ex
-        if os.path.exists(file_run):
-            print("file found mumble...")
-        else:
-            with open(file_run, 'w', encoding="utf-8") as f:
-                f.write("\n".join(lines2) )
-            cmd = "python " + file_run
-
-            s,ok = execute_command(cmd.split(), shell=True)
-            print(s)
-            os.remove(file_run)
-
-def fix_f(lines, debug):
-    lines2 = []
-    i = 0
-    while i < len(lines):
-        l = lines[i]
-        dx = l.find("#!f")
-        if dx >= 0:
-            l_head = l[dx+3:].strip()
-            l = l[:dx]
-            lines2.append(l)
-            id = indent(lines[i+1])
-            for j in range(i+1, 10000):
-                jid = len( indent(lines[j]) )
-                if  j+1 == len(lines) or ( jid < len(id) and len(lines[j].strip() ) > 0):
-                    break
-
-            if len(lines[j-1].strip()) == 0:
-                j = j - 1
-            funbody = "\n".join( lines[i+1:j] )
-            if i == j:
-                raise Exception("Empty function body")
-            i = j
-            comments, funrem = gcoms(funbody)
-            comments = [id + c for c in comments]
-            if len(comments) > 0:
-                lines2 += comments[0].split("\n")
-            lines2 += [id+"#!b"]
-            lines2 += (id+funrem.strip()).split("\n")
-            errm = l_head if len(l_head) > 0 else "Implement function body"
-            lines2 += [f'{id}#!b {errm}']
-
-        else:
-            lines2.append(l)
-            i += 1
-    return lines2
-
-def fix_b2(lines):
-    stats = {'n': 0}
-    def block_fun(lines, start_extra, end_extra, art, stats=None, **kwargs):
-        id = indent(lines[0])
-        lines = lines[1:] if len(lines[0].strip()) == 0 else lines
-        lines = lines[:-1] if len(lines[-1].strip()) == 0 else lines
-        cc = len(lines)
-        ee = end_extra.strip()
-        if len(ee) >= 2 and ee[0] == '"':
-            ee = ee[1:-1]
-        start_extra = start_extra.strip()
-        l2 = ([id+start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.",
-                                         id+f'raise NotImplementedError("{ee}")']
-        # if "\n".join(l2).find("l=l")>0:
-        #     a = 2342342
-        stats['n'] += cc
-        return l2, cc
-    lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats))
-    return lines2, stats['n'], cutout
-
-
-def fix_references(lines, info, strict=True):
-    for cmd in info['new_references']:
-        lines = fix_single_reference(lines, cmd, info['new_references'][cmd], strict=strict)
-    return lines
-
-def fix_single_reference(lines, cmd, aux, strict=True):
-    references = aux
-    s = "\n".join(lines)
-    i = 0
-    while True:
-        (i, j), reference, txt = find_tex_cite(s, start=i, key=cmd)
-        if i < 0:
-            break
-        if reference not in references:
-            er = "cref label not found for label: " + reference
-            if strict:
-                raise IndexError(er)
-            else:
-                print(er)
-                continue
-        r = references[reference]
-        rtxt = r['pyref']
-        s = s[:i] + rtxt + s[j + 1:]
-        i = i + len(rtxt)
-        print(cmd, rtxt)
-
-    lines = s.splitlines(keepends=False)
-    return lines
-
-
-def fix_cite(lines, info, strict=True):
-    lines = fix_references(lines, info, strict=strict)
-
-    s = "\n".join(lines)
-    i = 0
-    all_refs = []
-    while True:
-        (i, j), reference, txt = find_tex_cite(s, start=i, key="\\cite")
-        if i < 0:
-            break
-        if reference not in info['references']:
-            raise IndexError("no such reference: " + reference)
-        ref = info['references'][reference]
-        label = ref['label']
-        rtxt = f"({label}" + (", "+txt if txt is not None else "") + ")"
-        r = ref['plain']
-        if r not in all_refs:
-            all_refs.append(r)
-        s = s[:i] + rtxt + s[j+1:]
-        i = i + len(rtxt)
-
-    cpr = "{{copyright}}"
-    if not s.startswith(COMMENT):
-        s = f"{COMMENT}\n{cpr}\n{COMMENT}\n" + s
-    if len(all_refs) > 0:
-        i = s.find(COMMENT, s.find(COMMENT)+1)
-        all_refs = ["  " + r for r in all_refs]
-        s = s[:i] + "\nReferences:\n" + "\n".join(all_refs) + "\n" + s[i:]
-
-    s = s.replace(cpr, info['code_copyright'])
-    return s
-
-def full_strip(lines, tags=None):
-    if tags is None:
-        tags = ["#!s", "#!o", "#!f", "#!b"]
-    for t in tags:
-        lines = strip_tag(lines, t)
-    return lines
-
-def censor_file(file, info, paths, run_files=True, run_out_dirs=None, cut_files=True, solution_list=None,
-                censor_files=True,
-                include_path_base=None,
-                strict=True):
-    dbug = False
-    with open(file, 'r', encoding='utf8') as f:
-        s = f.read()
-        s = s.lstrip()
-        lines = s.split("\n")
-        for k, l in enumerate(lines):
-            if l.find(" # !") > 0:
-                print(f"{file}:{k}> bad snipper tag, fixing")
-            lines[k] = l.replace("# !", "#!")
-
-        try:
-            s = fix_cite(lines, info, strict=strict)
-            lines = s.split("\n")
-        except IndexError as e:
-            print(e)
-            print("Fuckup in file, cite/reference tag not found!>", file)
-            raise e
-
-        if run_files or cut_files:
-            ofiles = []
-            for rod in run_out_dirs:
-                if not os.path.isdir(rod):
-                    os.mkdir(rod)
-                ofiles.append(os.path.join(rod, os.path.basename(file).split(".")[0]) )
-            ofiles[0] = ofiles[0].replace("\\", "/")
-
-            if run_files:
-                run_o(lines, file=file, output=ofiles[0])
-                run_i(lines, file=file, output=ofiles[0])
-            if cut_files:
-                save_s(lines, file=file, output=ofiles[0], include_path_base=include_path_base)  # save file snips to disk
-        lines = full_strip(lines, ["#!s", "#!o", '#!i'])
-
-        # lines = fix_c(lines)
-        if censor_files:
-            lines = fix_f(lines, dbug)
-            lines, nB, cut = fix_b2(lines)
-        else:
-            nB = 0
-        lines = fix_r(lines)
-
-        if censor_files and len(cut) > 0 and solution_list is not None:
-            fname = file.__str__()
-            i = fname.find("irlc")
-            wk = fname[i+5:fname.find("\\", i+6)]
-            sp = paths['02450students'] +"/solutions/"
-            if not os.path.exists(sp):
-                os.mkdir(sp)
-            sp = sp + wk
-            if not os.path.exists(sp):
-                os.mkdir(sp)
-
-            stext = ["\n".join(lines) for lines in cut]
-            for i,sol in enumerate(stext):
-                sout = sp + f"/{os.path.basename(fname)[:-3]}_TODO_{i+1}.py"
-                wsol = any([True for s in solution_list if os.path.basename(sout).startswith(s)])
-                print(sout, "(published)" if wsol else "")
-                if wsol:
-                    with open(sout, "w") as f:
-                        f.write(sol)
-
-        if len(lines[-1])>0:
-            lines.append("")
-        s2 = "\n".join(lines)
-
-    with open(file, 'w', encoding='utf-8') as f:
-        f.write(s2)
-    return nB
-# lines: 294, 399, 420, 270
\ No newline at end of file
+# from snipper.load_citations import find_tex_cite
+# import os
+# import functools
+# from jinjafy import execute_command
+# import textwrap
+# import re
+#
+# COMMENT = '"""'
+# def indent(l):
+#     v = len(l) - len(l.lstrip())
+#     return l[:v]
+#
+# def fix_r(lines):
+#     for i,l in enumerate(lines):
+#         if "#!r" in l:
+#             lines[i] = indent(l) + l[l.find("#!r") + 3:].lstrip()
+#     return lines
+#
+# def gcoms(s):
+#     coms = []
+#     while True:
+#         i = s.find(COMMENT)
+#         if i >= 0:
+#             j = s.find(COMMENT, i+len(COMMENT))+3
+#         else:
+#             break
+#         if j < 0:
+#             raise Exception("comment tag not closed")
+#         coms.append(s[i:j])
+#         s = s[:i] + s[j:]
+#         if len(coms) > 10:
+#             print("long comments in file", i)
+#     return coms, s
+#
+# def strip_tag(lines, tag):
+#     lines2 = []
+#     for l in lines:
+#         dx = l.find(tag)
+#         if dx > 0:
+#             l = l[:dx]
+#             if len(l.strip()) == 0:
+#                 l = None
+#         if l is not None:
+#             lines2.append(l)
+#     return lines2
+#
+# def block_process(lines, tag, block_fun):
+#     i = 0
+#     didfind = False
+#     lines2 = []
+#     block_out = []
+#     cutout = []
+#     while i < len(lines):
+#         l = lines[i]
+#         dx = l.find(tag)
+#         if dx >= 0:
+#             if l.find(tag, dx+1) > 0:
+#                 j = i
+#             else:
+#                 for j in range(i + 1, 10000):
+#                     if j >= len(lines):
+#                         print("\n".join(lines))
+#                         print("very bad end-line j while fixing tag", tag)
+#                         raise Exception("Bad line while fixing", tag)
+#                     if lines[j].find(tag) >= 0:
+#                         break
+#
+#             pbody = lines[i:j+1]
+#             if i == j:
+#                 start_extra = lines[j][dx:lines[j].rfind(tag)].strip()
+#                 end_extra = lines[j][lines[j].rfind(tag) + len(tag):].strip()
+#             else:
+#                 start_extra = lines[i][dx:].strip()
+#                 end_extra = lines[j][lines[j].rfind(tag) + len(tag):].strip()
+#
+#             cutout.append(pbody)
+#             tmp_ = start_extra.split("=")
+#             arg = None if len(tmp_) <= 1 else tmp_[1].split(" ")[0]
+#             start_extra = ' '.join(start_extra.split(" ")[1:] )
+#
+#             pbody[0] = pbody[0][:dx]
+#             if j > i:
+#                 pbody[-1] = pbody[-1][:pbody[-1].find(tag)]
+#
+#             nlines, extra = block_fun(lines=pbody, start_extra=start_extra, end_extra=end_extra, art=arg, head=lines[:i], tail=lines[j+1:])
+#             lines2 += nlines
+#             block_out.append(extra)
+#             i = j+1
+#             didfind = True
+#             if "!b" in end_extra:
+#                 assert(False)
+#         else:
+#             lines2.append(l)
+#             i += 1
+#
+#     return lines2, didfind, block_out, cutout
+#
+#
+# def rem_nonprintable_ctrl_chars(txt):
+#     """Remove non_printable ascii control characters """
+#     #Removes the ascii escape chars
+#     try:
+#         txt = re.sub(r'[^\x20-\x7E|\x09-\x0A]','', txt)
+#         # remove non-ascii characters
+#         txt = repr(txt).decode('unicode_escape').encode('ascii','ignore')[1:-1]
+#     except Exception as exception:
+#         print(exception)
+#     return txt
+#
+#
+# def run_i(lines, file, output):
+#     extra = dict(python=None, output=output, evaluated_lines=0)
+#     def block_fun(lines, start_extra, end_extra, art, head="", tail="", output=None, extra=None):
+#         outf = output + ("_" + art if art is not None and len(art) > 0 else "") + ".shell"
+#         lines = full_strip(lines)
+#         s = "\n".join(lines)
+#         s.replace("...", "..") # passive-aggressively truncate ... because of #issues.
+#         lines = textwrap.dedent(s).strip().splitlines()
+#
+#         if extra['python'] is None:
+#             # import thtools
+#
+#             if os.name == 'nt':
+#                 import wexpect as we
+#             else:
+#                 import pexpect as we
+#             an = we.spawn("python", encoding="utf-8", timeout=20)
+#             an.expect([">>>"])
+#             extra['python'] = an
+#
+#         analyzer = extra['python']
+#         def rsession(analyzer, lines):
+#             l2 = []
+#             for i, l in enumerate(lines):
+#                 l2.append(l)
+#                 if l.startswith(" ") and i < len(lines)-1 and not lines[i+1].startswith(" "):
+#                     if not lines[i+1].strip().startswith("else:") and not lines[i+1].strip().startswith("elif") :
+#                         l2.append("\n")
+#
+#             lines = l2
+#             alines = []
+#
+#             # indented = False
+#             in_dot_mode = False
+#             if len(lines[-1]) > 0 and (lines[-1].startswith(" ") or lines[-1].startswith("\t")):
+#                 lines += [""]
+#
+#             for i, word in enumerate(lines):
+#                 analyzer.sendline(word)
+#                 before = ""
+#                 while True:
+#                     analyzer.expect_exact([">>>", "..."])
+#                     before += analyzer.before
+#                     if analyzer.before.endswith("\n"):
+#                         break
+#                     else:
+#                         before += analyzer.after
+#
+#                 dotmode = analyzer.after == "..."
+#                 if 'dir(s)' in word:
+#                     pass
+#                 if 'help(s.find)' in word:
+#                     pass
+#                 if dotmode:
+#                     # alines.append("..." + word)
+#                     alines.append(">>>" + analyzer.before.rstrip() if not in_dot_mode else "..." + analyzer.before.rstrip())
+#                     in_dot_mode = True
+#                     # if i < len(lines) - 1 and not lines[i + 1].startswith(" "):
+#                     #     analyzer.sendline("\n")  # going out of indentation mode .
+#                     #     analyzer.expect_exact([">>>", "..."])
+#                     #     alines.append("..." + analyzer.after.rstrip())
+#                     #     pass
+#                 else:
+#                     alines.append( ("..." if in_dot_mode else ">>>") + analyzer.before.rstrip())
+#                     in_dot_mode = False
+#             return alines
+#
+#         for l in (head[extra['evaluated_lines']:] + ["\n"]):
+#             analyzer.sendline(l)
+#             analyzer.expect_exact([">>>", "..."])
+#
+#
+#         alines = rsession(analyzer, lines)
+#         extra['evaluated_lines'] += len(head) + len(lines)
+#         lines = alines
+#         return lines, [outf, lines]
+#     try:
+#         a,b,c,_ = block_process(lines, tag="#!i", block_fun=functools.partial(block_fun, output=output, extra=extra))
+#         if extra['python'] is not None:
+#             extra['python'].close()
+#
+#         if len(c)>0:
+#             kvs= { v[0] for v in c}
+#             for outf in kvs:
+#                 out = "\n".join( ["\n".join(v[1]) for v in c if v[0] == outf] )
+#                 out = out.replace("\r", "")
+#
+#                 with open(outf, 'w') as f:
+#                     f.write(out)
+#
+#     except Exception as e:
+#         print("lines are")
+#         print("\n".join(lines))
+#         print("Bad thing in #!i command in file", file)
+#         raise e
+#     return lines
+#
+# def save_s(lines, file, output, include_path_base=None): # save file snips to disk
+#     def block_fun(lines, start_extra, end_extra, art, output, **kwargs):
+#         outf = output + ("_" + art if art is not None and len(art) > 0 else "") + ".py"
+#         lines = full_strip(lines)
+#         return lines, [outf, lines]
+#     try:
+#         a,b,c,_ = block_process(lines, tag="#!s", block_fun=functools.partial(block_fun, output=output))
+#
+#         if len(c)>0:
+#             kvs= { v[0] for v in c}
+#             for outf in kvs:
+#
+#                 out = "\n".join([f"# {include_path_base}"]  + ["\n".join(v[1]) for v in c if v[0] == outf] )
+#
+#                 with open(outf, 'w') as f:
+#                     f.write(out)
+#
+#     except Exception as e:
+#         print("lines are")
+#         print("\n".join(lines))
+#         print("Bad thing in #!s command in file", file)
+#         raise e
+#     return lines
+#
+# def run_o(lines, file, output):
+#     def block_fun(lines, start_extra, end_extra, art, output, **kwargs):
+#         id = indent(lines[0])
+#         outf = output + ("_" + art if art is not None else "") + ".txt"
+#         l2 = []
+#         l2 += [id + "import sys", id + f"sys.stdout = open('{outf}', 'w')"]
+#         l2 += lines
+#         # l2 += [indent(lines[-1]) + "sys.stdout.close()"]
+#         l2 += [indent(lines[-1]) + "sys.stdout = sys.__stdout__"]
+#         return l2, None
+#     try:
+#         lines2, didfind, extra, _ = block_process(lines, tag="#!o", block_fun=functools.partial(block_fun, output=output) )
+#     except Exception as e:
+#         print("Bad file: ", file)
+#         print("I was cutting the #!o tag")
+#         print("\n".join( lines) )
+#         raise(e)
+#
+#     if didfind:
+#         fp, ex = os.path.splitext(file)
+#         file_run = fp + "_RUN_OUTPUT_CAPTURE" +ex
+#         if os.path.exists(file_run):
+#             print("file found mumble...")
+#         else:
+#             with open(file_run, 'w', encoding="utf-8") as f:
+#                 f.write("\n".join(lines2) )
+#             cmd = "python " + file_run
+#
+#             s,ok = execute_command(cmd.split(), shell=True)
+#             print(s)
+#             os.remove(file_run)
+#
+# def fix_f(lines, debug):
+#     lines2 = []
+#     i = 0
+#     while i < len(lines):
+#         l = lines[i]
+#         dx = l.find("#!f")
+#         if dx >= 0:
+#             l_head = l[dx+3:].strip()
+#             l = l[:dx]
+#             lines2.append(l)
+#             id = indent(lines[i+1])
+#             for j in range(i+1, 10000):
+#                 jid = len( indent(lines[j]) )
+#                 if  j+1 == len(lines) or ( jid < len(id) and len(lines[j].strip() ) > 0):
+#                     break
+#
+#             if len(lines[j-1].strip()) == 0:
+#                 j = j - 1
+#             funbody = "\n".join( lines[i+1:j] )
+#             if i == j:
+#                 raise Exception("Empty function body")
+#             i = j
+#             comments, funrem = gcoms(funbody)
+#             comments = [id + c for c in comments]
+#             if len(comments) > 0:
+#                 lines2 += comments[0].split("\n")
+#             lines2 += [id+"#!b"]
+#             lines2 += (id+funrem.strip()).split("\n")
+#             errm = l_head if len(l_head) > 0 else "Implement function body"
+#             lines2 += [f'{id}#!b {errm}']
+#
+#         else:
+#             lines2.append(l)
+#             i += 1
+#     return lines2
+#
+# def fix_b2(lines):
+#     stats = {'n': 0}
+#     def block_fun(lines, start_extra, end_extra, art, stats=None, **kwargs):
+#         id = indent(lines[0])
+#         lines = lines[1:] if len(lines[0].strip()) == 0 else lines
+#         lines = lines[:-1] if len(lines[-1].strip()) == 0 else lines
+#         cc = len(lines)
+#         ee = end_extra.strip()
+#         if len(ee) >= 2 and ee[0] == '"':
+#             ee = ee[1:-1]
+#         start_extra = start_extra.strip()
+#         l2 = ([id+start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.",
+#                                          id+f'raise NotImplementedError("{ee}")']
+#         # if "\n".join(l2).find("l=l")>0:
+#         #     a = 2342342
+#         stats['n'] += cc
+#         return l2, cc
+#     lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats))
+#     return lines2, stats['n'], cutout
+#
+#
+# def fix_references(lines, info, strict=True):
+#     for cmd in info['new_references']:
+#         lines = fix_single_reference(lines, cmd, info['new_references'][cmd], strict=strict)
+#     return lines
+#
+# def fix_single_reference(lines, cmd, aux, strict=True):
+#     references = aux
+#     s = "\n".join(lines)
+#     i = 0
+#     while True:
+#         (i, j), reference, txt = find_tex_cite(s, start=i, key=cmd)
+#         if i < 0:
+#             break
+#         if reference not in references:
+#             er = "cref label not found for label: " + reference
+#             if strict:
+#                 raise IndexError(er)
+#             else:
+#                 print(er)
+#                 continue
+#         r = references[reference]
+#         rtxt = r['pyref']
+#         s = s[:i] + rtxt + s[j + 1:]
+#         i = i + len(rtxt)
+#         print(cmd, rtxt)
+#
+#     lines = s.splitlines(keepends=False)
+#     return lines
+#
+#
+# def fix_cite(lines, info, strict=True):
+#     lines = fix_references(lines, info, strict=strict)
+#
+#     s = "\n".join(lines)
+#     i = 0
+#     all_refs = []
+#     while True:
+#         (i, j), reference, txt = find_tex_cite(s, start=i, key="\\cite")
+#         if i < 0:
+#             break
+#         if reference not in info['references']:
+#             raise IndexError("no such reference: " + reference)
+#         ref = info['references'][reference]
+#         label = ref['label']
+#         rtxt = f"({label}" + (", "+txt if txt is not None else "") + ")"
+#         r = ref['plain']
+#         if r not in all_refs:
+#             all_refs.append(r)
+#         s = s[:i] + rtxt + s[j+1:]
+#         i = i + len(rtxt)
+#
+#     cpr = "{{copyright}}"
+#     if not s.startswith(COMMENT):
+#         s = f"{COMMENT}\n{cpr}\n{COMMENT}\n" + s
+#     if len(all_refs) > 0:
+#         i = s.find(COMMENT, s.find(COMMENT)+1)
+#         all_refs = ["  " + r for r in all_refs]
+#         s = s[:i] + "\nReferences:\n" + "\n".join(all_refs) + "\n" + s[i:]
+#
+#     s = s.replace(cpr, info['code_copyright'])
+#     return s
+#
+# def full_strip(lines, tags=None):
+#     if tags is None:
+#         tags = ["#!s", "#!o", "#!f", "#!b"]
+#     for t in tags:
+#         lines = strip_tag(lines, t)
+#     return lines
+#
+# def censor_file(file, info, paths, run_files=True, run_out_dirs=None, cut_files=True, solution_list=None,
+#                 censor_files=True,
+#                 include_path_base=None,
+#                 strict=True):
+#     dbug = False
+#     with open(file, 'r', encoding='utf8') as f:
+#         s = f.read()
+#         s = s.lstrip()
+#         lines = s.split("\n")
+#         for k, l in enumerate(lines):
+#             if l.find(" # !") > 0:
+#                 print(f"{file}:{k}> bad snipper tag, fixing")
+#             lines[k] = l.replace("# !", "#!")
+#
+#         try:
+#             s = fix_cite(lines, info, strict=strict)
+#             lines = s.split("\n")
+#         except IndexError as e:
+#             print(e)
+#             print("Fuckup in file, cite/reference tag not found!>", file)
+#             raise e
+#
+#         if run_files or cut_files:
+#             ofiles = []
+#             for rod in run_out_dirs:
+#                 if not os.path.isdir(rod):
+#                     os.mkdir(rod)
+#                 ofiles.append(os.path.join(rod, os.path.basename(file).split(".")[0]) )
+#             ofiles[0] = ofiles[0].replace("\\", "/")
+#
+#             if run_files:
+#                 run_o(lines, file=file, output=ofiles[0])
+#                 run_i(lines, file=file, output=ofiles[0])
+#             if cut_files:
+#                 save_s(lines, file=file, output=ofiles[0], include_path_base=include_path_base)  # save file snips to disk
+#         lines = full_strip(lines, ["#!s", "#!o", '#!i'])
+#
+#         # lines = fix_c(lines)
+#         if censor_files:
+#             lines = fix_f(lines, dbug)
+#             lines, nB, cut = fix_b2(lines)
+#         else:
+#             nB = 0
+#         lines = fix_r(lines)
+#
+#         if censor_files and len(cut) > 0 and solution_list is not None:
+#             fname = file.__str__()
+#             i = fname.find("irlc")
+#             wk = fname[i+5:fname.find("\\", i+6)]
+#             sp = paths['02450students'] +"/solutions/"
+#             if not os.path.exists(sp):
+#                 os.mkdir(sp)
+#             sp = sp + wk
+#             if not os.path.exists(sp):
+#                 os.mkdir(sp)
+#
+#             stext = ["\n".join(lines) for lines in cut]
+#             for i,sol in enumerate(stext):
+#                 sout = sp + f"/{os.path.basename(fname)[:-3]}_TODO_{i+1}.py"
+#                 wsol = any([True for s in solution_list if os.path.basename(sout).startswith(s)])
+#                 print(sout, "(published)" if wsol else "")
+#                 if wsol:
+#                     with open(sout, "w") as f:
+#                         f.write(sol)
+#
+#         if len(lines[-1])>0:
+#             lines.append("")
+#         s2 = "\n".join(lines)
+#
+#     with open(file, 'w', encoding='utf-8') as f:
+#         f.write(s2)
+#     return nB
+# # lines: 294, 399, 420, 270
\ No newline at end of file
-- 
GitLab