diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5391d87392a9983b4dcac6751022ac8b0c20e424 --- /dev/null +++ b/.gitignore @@ -0,0 +1,138 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ \ No newline at end of file diff --git a/setup.py b/setup.py index ad7161828796200b8e81d49a5bf73e7d9fe55774..a7fc926195803d749a51d95f64d5cd39bb7cc54c 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ with open("README.md", "r", encoding="utf-8") as fh: setuptools.setup( name="codesnipper", - version="0.1.0", + version="0.1.1", author="Tue Herlau", author_email="tuhe@dtu.dk", description="A lightweight framework for censoring student solutions files and extracting code + output", diff --git a/src/snipper/__init__.py b/src/snipper/__init__.py index f102a9cadfa89ce554b3b26d2b90bfba2e05273c..4e820bd71b5043c9aa88372b27090362b28a3900 100644 --- a/src/snipper/__init__.py +++ b/src/snipper/__init__.py @@ -1 +1,3 @@ __version__ = "0.0.1" +from snipper.snip_dir import snip_dir + diff --git a/src/snipper/__pycache__/__init__.cpython-38.pyc b/src/snipper/__pycache__/__init__.cpython-38.pyc index f52a70bcdae56e64584b49d2330bcdd216e21e6d..eae9fb89b94899b9aab163c73358f43efa116212 100644 Binary files a/src/snipper/__pycache__/__init__.cpython-38.pyc and b/src/snipper/__pycache__/__init__.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/block_parsing.cpython-38.pyc b/src/snipper/__pycache__/block_parsing.cpython-38.pyc index 5f0fa38e06964f4ccf08c26117b7b787eb3b0791..f863b8116bd2e1ce5093ddf2a92c11e93ae21392 100644 Binary files a/src/snipper/__pycache__/block_parsing.cpython-38.pyc and b/src/snipper/__pycache__/block_parsing.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/fix_bf.cpython-38.pyc b/src/snipper/__pycache__/fix_bf.cpython-38.pyc index 50edfabfa447d901387da5638cca83d0bdb560f9..08110df11cf8ba047f40b6a36f84bec56cdd98a5 100644 Binary files a/src/snipper/__pycache__/fix_bf.cpython-38.pyc and b/src/snipper/__pycache__/fix_bf.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/fix_cite.cpython-38.pyc b/src/snipper/__pycache__/fix_cite.cpython-38.pyc index 8182783048657d4e03b7fdc09bc39298fccc3065..41ff88198ed4802e312744218929d4693baa9cdc 100644 Binary files a/src/snipper/__pycache__/fix_cite.cpython-38.pyc and b/src/snipper/__pycache__/fix_cite.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/fix_i.cpython-38.pyc b/src/snipper/__pycache__/fix_i.cpython-38.pyc index b538f528512b367e6f1e99eaf64f3f4e0c4363b5..1f1675e90766d6fb6d166d74c20670272061e74b 100644 Binary files a/src/snipper/__pycache__/fix_i.cpython-38.pyc and b/src/snipper/__pycache__/fix_i.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/fix_s.cpython-38.pyc b/src/snipper/__pycache__/fix_s.cpython-38.pyc index 82d5e8e8b679eff28ef31891521b5b451b77d499..9ee4629819d4fb80d652bde8b91cbd64e3ec86af 100644 Binary files a/src/snipper/__pycache__/fix_s.cpython-38.pyc and b/src/snipper/__pycache__/fix_s.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/load_citations.cpython-38.pyc b/src/snipper/__pycache__/load_citations.cpython-38.pyc index e79c0857a12b2136f7aa6dcb235a3dc9c4293688..d56371f0185a8d8fca7bfd94be7ecc44f1d2f380 100644 Binary files a/src/snipper/__pycache__/load_citations.cpython-38.pyc and b/src/snipper/__pycache__/load_citations.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/snip_dir.cpython-38.pyc b/src/snipper/__pycache__/snip_dir.cpython-38.pyc index a8205b9d82839835b55f286b4d28b2c5c8467e71..4ce3da3636250429478c87ecb3bafc063b86f0dc 100644 Binary files a/src/snipper/__pycache__/snip_dir.cpython-38.pyc and b/src/snipper/__pycache__/snip_dir.cpython-38.pyc differ diff --git a/src/snipper/__pycache__/snipper_main.cpython-38.pyc b/src/snipper/__pycache__/snipper_main.cpython-38.pyc index fd90def87e043144655478d490c81d7f34538bfe..44240208f1eec24853d05c9805203b70e188365e 100644 Binary files a/src/snipper/__pycache__/snipper_main.cpython-38.pyc and b/src/snipper/__pycache__/snipper_main.cpython-38.pyc differ diff --git a/src/snipper/block_parsing.py b/src/snipper/block_parsing.py index cfad9c429c6c38c10c7ecbd615831ad3b80abece..23d4271adbc88234cab2988ae60c4568e2472126 100644 --- a/src/snipper/block_parsing.py +++ b/src/snipper/block_parsing.py @@ -20,31 +20,41 @@ def block_iterate(lines, tag): yield contents +def block_join(contents): + blk = contents['block'] + if len(blk) == 1: + blk = [blk[0] + contents['post1'] + " " + contents['post2']] + elif len(blk) > 1: + blk = [blk[0] + contents['post1']] + blk[1:-1] + [blk[-1] + contents['post2']] + return contents['first'] + blk + contents['last'] + def block_split(lines, tag): stag = tag[:2] # Start of any next tag. - def join(contents): - return contents['first'] + [contents['block'][0] + contents['post1']] + contents['block'][1:-1] \ - + [contents['block'][-1] + contents['post2']] + contents['last'] + contents = {} i, j = f2(lines, tag) def get_tag_args(line): # line = line.strip() k = line.find(" ") - tag_args = ((line[:k + 1] if k >= 0 else line)[len(tag):] ).strip() + tag_args = ((line[:k + 1] if k >= 0 else line)[len(tag):] ).strip().split(";") + tag_args = [t.strip() for t in tag_args] + # if len(tag_args) == 0: + # return {'': ''} # No name. + tag_args = [t for t in tag_args if len(t) > 0] + tag_args = dict([t.split("=") if "=" in t else (t.lower().strip(), True) for t in tag_args]) - if len(tag_args) == 0: - return {'': ''} # No name. + if '' not in tag_args: + tag_args[''] = '' - tag_args = dict([t.split("=") for t in tag_args.split(";")]) return tag_args if i is None: return None else: - print( lines[i] ) + # print( lines[i] ) start_tag_args = get_tag_args(lines[i][j:]) START_TAG = f"{tag}={start_tag_args['']}" if start_tag_args[''] != '' else tag @@ -62,7 +72,7 @@ def block_split(lines, tag): l2 = lines[:i] + [lines[i][:j2], lines[i][j2:]] + lines[i2+1:] c2 = block_split(l2, tag=tag) c2['block'].pop() - c2['joined'] = join(c2) + c2['joined'] = block_join(c2) return c2 else: contents['first'] = lines[:i] @@ -81,7 +91,7 @@ def block_split(lines, tag): contents['arg2'], contents['post2'] = argpost(lines[i2], j2) blk = [lines[i][:j]] + lines[i+1:i2] + [lines[i2][:j2]] contents['block'] = blk - contents['joined'] = join(contents) + contents['joined'] = block_join(contents) contents['start_tag_args'] = start_tag_args contents['name'] = start_tag_args[''] return contents diff --git a/src/snipper/fix_bf.py b/src/snipper/fix_bf.py index 3a128bc9b5bda6057ca8643c13f09f4b4f3b4950..c4d5254adb5e889060e50488e132291397c82700 100644 --- a/src/snipper/fix_bf.py +++ b/src/snipper/fix_bf.py @@ -1,6 +1,7 @@ import functools -from snipper.legacy import gcoms, block_process +from snipper.legacy import gcoms from snipper.block_parsing import indent +from snipper.block_parsing import block_split, block_join def fix_f(lines, debug): @@ -29,39 +30,53 @@ def fix_f(lines, debug): comments = [id + c for c in comments] if len(comments) > 0: lines2 += comments[0].split("\n") - # lines2 += [id+"#!b"] f = [id + l.strip() for l in funrem.splitlines()] f[0] = f[0] + "#!b" - - # lines2 += (id+funrem.strip()).split("\n") errm = l_head if len(l_head) > 0 else "Implement function body" f[-1] = f[-1] + f' #!b {errm}' lines2 += f - # lines2 += [f'{id}#!b {errm}'] - else: lines2.append(l) i += 1 return lines2 +# stats = {'n': 0} +def _block_fun(lines, start_extra, end_extra, keep=False, silent=False): + id = indent(lines[0]) + lines = lines[1:] if len(lines[0].strip()) == 0 else lines + lines = lines[:-1] if len(lines[-1].strip()) == 0 else lines + cc = len(lines) + ee = end_extra.strip() + if len(ee) >= 2 and ee[0] == '"': + ee = ee[1:-1] + start_extra = start_extra.strip() + if keep: + l2 = ['GARBAGE'] * cc + else: + if silent: + l2 = [] + cc = 0 + else: + l2 = ([id + start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.", + id + f'raise NotImplementedError("{ee}")'] + + # stats['n'] += cc + return l2, cc + + def fix_b2(lines, keep=False): - stats = {'n': 0} - def block_fun(lines, start_extra, end_extra, art, stats=None, **kwargs): - id = indent(lines[0]) - lines = lines[1:] if len(lines[0].strip()) == 0 else lines - lines = lines[:-1] if len(lines[-1].strip()) == 0 else lines - cc = len(lines) - ee = end_extra.strip() - if len(ee) >= 2 and ee[0] == '"': - ee = ee[1:-1] - start_extra = start_extra.strip() - if keep: - l2 = ['GARBAGE'] * cc - else: - l2 = ([id+start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.", id+f'raise NotImplementedError("{ee}")'] + cutout = [] + n = 0 + while True: + b = block_split(lines, tag="#!b") + if b == None: + break + args = {k:v for k, v in b['start_tag_args'].items() if len(k) > 0} + cutout += b['block'] + b['block'], dn = _block_fun(b['block'], start_extra=b['arg1'], end_extra=b['arg2'], **args, keep=keep) + lines = block_join(b) + n += dn - stats['n'] += cc - return l2, cc - lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats)) - return lines2, stats['n'], cutout \ No newline at end of file + # lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats)) + return lines, n, cutout \ No newline at end of file diff --git a/src/snipper/fix_cite.py b/src/snipper/fix_cite.py index c4cfb1c07a3ba881d864557c94d76120bc69ea6a..4b49641529051467b8ddeeaf91eb66b82d344465 100644 --- a/src/snipper/fix_cite.py +++ b/src/snipper/fix_cite.py @@ -19,7 +19,7 @@ def fix_aux_special(lines, aux, command='\\nref', output='\cite[%s]{my_bibtex_en def fix_aux(lines, aux, strict=True): l2 = fix_single_reference(lines, aux=aux, cmd="\\ref", strict=True) - print("\n".join(l2)) + # print("\n".join(l2)) return l2 def fix_bibtex(lines, bibtex): diff --git a/src/snipper/fix_i.py b/src/snipper/fix_i.py index 0d17a39c250627d738a3544f62fc9cc52b8c0257..28b5bfbe733929b4083e490cdae58d86d511caf9 100644 --- a/src/snipper/fix_i.py +++ b/src/snipper/fix_i.py @@ -2,6 +2,11 @@ import functools import textwrap from snipper.legacy import block_process from snipper.block_parsing import full_strip +import os +if os.name == 'nt': + import wexpect as we +else: + import pexpect as we def run_i(lines, file, output): @@ -14,11 +19,6 @@ def run_i(lines, file, output): lines = textwrap.dedent(s).strip().splitlines() if extra['python'] is None: - import os - if os.name == 'nt': - import wexpect as we - else: - import pexpect as we an = we.spawn("python", encoding="utf-8", timeout=20) an.expect([">>>"]) extra['python'] = an @@ -34,8 +34,6 @@ def run_i(lines, file, output): lines = l2 alines = [] - - # indented = False in_dot_mode = False if len(lines[-1]) > 0 and (lines[-1].startswith(" ") or lines[-1].startswith("\t")): lines += [""] @@ -57,14 +55,8 @@ def run_i(lines, file, output): if 'help(s.find)' in word: pass if dotmode: - # alines.append("..." + word) alines.append(">>>" + analyzer.before.rstrip() if not in_dot_mode else "..." + analyzer.before.rstrip()) in_dot_mode = True - # if i < len(lines) - 1 and not lines[i + 1].startswith(" "): - # analyzer.sendline("\n") # going out of indentation mode . - # analyzer.expect_exact([">>>", "..."]) - # alines.append("..." + analyzer.after.rstrip()) - # pass else: alines.append( ("..." if in_dot_mode else ">>>") + analyzer.before.rstrip()) in_dot_mode = False diff --git a/src/snipper/fix_s.py b/src/snipper/fix_s.py index c74028012c46aee0d4e8ab1203393f4ca0dab887..5a97096a6321ddab15efa6cd46f900777476b92a 100644 --- a/src/snipper/fix_s.py +++ b/src/snipper/fix_s.py @@ -1,26 +1,47 @@ from collections import defaultdict import os from snipper.block_parsing import block_iterate +from snipper.snipper_main import full_strip def get_s(lines): """ Return snips from 'lines' """ blocks = defaultdict(list) for c in block_iterate(lines, "#!s"): + # c['start_tag_args'] + if not c['start_tag_args'].get('keeptags', False): + c['block'] = full_strip(c['block']) + else: + # In this case the #! tags are kept in. + pass + # print("keepting tags.") blocks[c['name']].append(c) output = {} for name, co in blocks.items(): - output[name] = [l for c in co for l in c['block']] + slines = [l for c in co for l in c['block']] + # full_strip("") + # c['block']['args'] + # slines = slines[ 23] + # co. + output[name] = slines return output +# def _s_block_process(): +# +# pass + def save_s(lines, output_dir, file_path): # save file snips to disk content = get_s(lines) - if not os.path.isdir(output_dir): + # Only make output dir if needed. + if len(content) > 0 and not os.path.isdir(output_dir): os.mkdir(output_dir) + for name, ll in content.items(): if file_path is not None: + file_path = file_path.replace("\\", "/") ll = [f"# {file_path}"] + ll out = "\n".join(ll) + with open(output_dir + "/" + os.path.basename(file_path)[:-3] + ("_" + name if len(name) > 0 else name) + ".py", 'w') as f: f.write(out) diff --git a/src/snipper/load_citations.py b/src/snipper/load_citations.py index 786c02b65d00d7291a1831f5063e9bdc179170e8..0512cd1b00a0c662d2f6188f457cd97db9c9901a 100644 --- a/src/snipper/load_citations.py +++ b/src/snipper/load_citations.py @@ -1,18 +1,17 @@ import os import io -# from coursebox.core.info_paths import get_paths from pybtex import plugin from pybtex.database.input import bibtex +from warnings import warn ### Newstyle loading. - def get_aux(auxfile): # paths = get_paths() # auxfile = os.path.join(paths['02450public'], auxfile) if not os.path.exists(auxfile): - print(auxfile) - from warnings import warn - warn("Could not find file") + # print(auxfile) + + warn("Could not find bibtex file: "+ auxfile) return {} with open(auxfile, 'r') as f: @@ -104,17 +103,7 @@ def get_bibtex(bibfile): 'filename': url, } - # newref = {} - # ls = lambda x: x if isinstance(x, list) else [x] - # if 'tex_command' in gi: - # for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ): - # ax = parse_aux(aux, bibtex=gi['bibtex']) - # for k in ax: - # ax[k]['pyref'] = display%(ax[k]['nicelabel'],) - # newref[cmd] = ax - - return refs#, newref - + return refs def find_tex_cite(s, start=0, key="\\cite"): @@ -132,112 +121,112 @@ def find_tex_cite(s, start=0, key="\\cite"): return (i, j), reference, txt ### Oldstyle loading -def get_references(bibfile, gi): - """ - all references. - """ - if not os.path.exists(bibfile): - return None - - pybtex_style = plugin.find_plugin('pybtex.style.formatting', 'alpha')() - pybtex_html_backend = plugin.find_plugin('pybtex.backends', 'html')() - pybtex_plain_backend = plugin.find_plugin('pybtex.backends', 'plaintext')() - pybtex_parser = bibtex.Parser() - - with open(bibfile, 'r', encoding='utf8') as f: - data = pybtex_parser.parse_stream(f) - - data_formatted = pybtex_style.format_entries(data.entries.values()) - refs = {} - - if 'auxfile' in gi: - all_references = parse_aux(gi['auxfile'], bibtex=gi['bibtex']) - else: - all_references = {} - - for entry in data_formatted: - output = io.StringIO() - output_plain = io.StringIO() - pybtex_plain_backend.output = output_plain.write - pybtex_html_backend.output = output.write - pybtex_html_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_html_backend)) - - pybtex_plain_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_plain_backend)) - - html = output.getvalue() - plain = output_plain.getvalue() - - entry.text.parts[-2].__str__() - url = "" - for i,p in enumerate(entry.text.parts): - if "\\url" in p.__str__(): - url = entry.text.parts[i+1] - break - url = url.__str__() - i1 = html.find("\\textbf") - i2 = html.find("</span>", i1) - dht = html[i1:i2] - dht = dht[dht.find(">")+1:] - html = html[:i1] + " <b>"+dht+"</b> " + html[i2+7:] - - plain = plain.replace("\\textbf ", "") - iu = plain.find("URL") - if iu > 0: - plain = plain[:iu] - - refs[entry.key] = {'html': html, - 'plain': plain, - 'label': entry.label, - 'filename': url, - 'references': all_references} - - newref = {} - ls = lambda x: x if isinstance(x, list) else [x] - if 'tex_command' in gi: - for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ): - ax = parse_aux(aux, bibtex=gi['bibtex']) - for k in ax: - ax[k]['pyref'] = display%(ax[k]['nicelabel'],) - newref[cmd] = ax - - return refs, newref - - -def parse_aux(auxfile, bibtex=None): - # paths = get_paths() - paths = {} - auxfile = os.path.join(paths['02450public'], auxfile) - if not os.path.exists(auxfile): - print(auxfile) - from warnings import warn - warn("Could not find file") - return {} - - with open(auxfile, 'r') as f: - items = f.readlines() - entries = {} - for e in items: - e = e.strip() - if e.startswith("\\newlabel") and "@cref" in e: - # print(e) - i0 = e.find("{") - i1 = e.find("@cref}") - key = e[i0+1:i1] - - j0 = e.find("{{[", i0)+3 - j1 = e.find("}", j0) - - val = e[j0:j1] - - label = val[:val.find("]")] - number = val[val.rfind("]")+1:] - - if label == "equation": - nlabel = f"eq. ({number})" - else: - nlabel = label.capitalize() + " " + number - - coderef = "\\cite[%s]{%s}"%(nlabel, bibtex) if bibtex is not None else None - entries[key] = {'pyref': coderef, 'nicelabel': nlabel, 'rawlabel': label, 'number': number} - return entries +# def get_references(bibfile, gi): +# """ +# all references. +# """ +# if not os.path.exists(bibfile): +# return None +# +# pybtex_style = plugin.find_plugin('pybtex.style.formatting', 'alpha')() +# pybtex_html_backend = plugin.find_plugin('pybtex.backends', 'html')() +# pybtex_plain_backend = plugin.find_plugin('pybtex.backends', 'plaintext')() +# pybtex_parser = bibtex.Parser() +# +# with open(bibfile, 'r', encoding='utf8') as f: +# data = pybtex_parser.parse_stream(f) +# +# data_formatted = pybtex_style.format_entries(data.entries.values()) +# refs = {} +# +# if 'auxfile' in gi: +# all_references = parse_aux(gi['auxfile'], bibtex=gi['bibtex']) +# else: +# all_references = {} +# +# for entry in data_formatted: +# output = io.StringIO() +# output_plain = io.StringIO() +# pybtex_plain_backend.output = output_plain.write +# pybtex_html_backend.output = output.write +# pybtex_html_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_html_backend)) +# +# pybtex_plain_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_plain_backend)) +# +# html = output.getvalue() +# plain = output_plain.getvalue() +# +# entry.text.parts[-2].__str__() +# url = "" +# for i,p in enumerate(entry.text.parts): +# if "\\url" in p.__str__(): +# url = entry.text.parts[i+1] +# break +# url = url.__str__() +# i1 = html.find("\\textbf") +# i2 = html.find("</span>", i1) +# dht = html[i1:i2] +# dht = dht[dht.find(">")+1:] +# html = html[:i1] + " <b>"+dht+"</b> " + html[i2+7:] +# +# plain = plain.replace("\\textbf ", "") +# iu = plain.find("URL") +# if iu > 0: +# plain = plain[:iu] +# +# refs[entry.key] = {'html': html, +# 'plain': plain, +# 'label': entry.label, +# 'filename': url, +# 'references': all_references} +# +# newref = {} +# ls = lambda x: x if isinstance(x, list) else [x] +# if 'tex_command' in gi: +# for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ): +# ax = parse_aux(aux, bibtex=gi['bibtex']) +# for k in ax: +# ax[k]['pyref'] = display%(ax[k]['nicelabel'],) +# newref[cmd] = ax +# +# return refs, newref +# +# +# def parse_aux(auxfile, bibtex=None): +# # paths = get_paths() +# paths = {} +# auxfile = os.path.join(paths['02450public'], auxfile) +# if not os.path.exists(auxfile): +# print(auxfile) +# from warnings import warn +# warn("Could not find file") +# return {} +# +# with open(auxfile, 'r') as f: +# items = f.readlines() +# entries = {} +# for e in items: +# e = e.strip() +# if e.startswith("\\newlabel") and "@cref" in e: +# # print(e) +# i0 = e.find("{") +# i1 = e.find("@cref}") +# key = e[i0+1:i1] +# +# j0 = e.find("{{[", i0)+3 +# j1 = e.find("}", j0) +# +# val = e[j0:j1] +# +# label = val[:val.find("]")] +# number = val[val.rfind("]")+1:] +# +# if label == "equation": +# nlabel = f"eq. ({number})" +# else: +# nlabel = label.capitalize() + " " + number +# +# coderef = "\\cite[%s]{%s}"%(nlabel, bibtex) if bibtex is not None else None +# entries[key] = {'pyref': coderef, 'nicelabel': nlabel, 'rawlabel': label, 'number': number} +# return entries diff --git a/src/snipper/snip_dir.py b/src/snipper/snip_dir.py index 328e93b3cb6cda31aea20f8460f9ce08f6a25c93..f264c59b152f3e228b20f8462ef604c881e69f3d 100644 --- a/src/snipper/snip_dir.py +++ b/src/snipper/snip_dir.py @@ -3,13 +3,21 @@ from snipper.snipper_main import censor_file from pathlib import Path import time import fnmatch - +import tempfile def snip_dir(source_dir, # Sources - dest_dir, # Will write to this directory + dest_dir=None, # Will write to this directory output_dir=None, # Where snippets are going to be stored references=None, # Reference database - exclude=None, clean_destination_dir=True): + exclude=None, clean_destination_dir=True, + run_files=True, # Run #!o tags and #!i tags + cut_files=True, # censor files. + license_head=None, + ): + + if dest_dir == None: + dest_dir = tempfile.mkdtemp() + print("[snipper]", "no destination dir was specified so using nonsense destination:", dest_dir) if references == None: references = dict(aux=None, bibtex=None, commands=[]) @@ -17,14 +25,16 @@ def snip_dir(source_dir, # Sources if exclude == None: exclude = [] + exclude += ["*__pycache__*"] # Just...no. if not os.path.exists(dest_dir): os.makedirs(dest_dir) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - output_dir = os.path.abspath(output_dir) source_dir = os.path.abspath(source_dir) dest_dir = os.path.abspath(dest_dir) + if output_dir == None: + output_dir = os.path.dirname(source_dir) + "/output" + + output_dir = os.path.abspath(output_dir) if os.path.samefile( source_dir, dest_dir): raise Exception("Source and destination is the same") @@ -33,15 +43,14 @@ def snip_dir(source_dir, # Sources os.makedirs(dest_dir) out = dest_dir - hw = {'base': source_dir, - 'exclusion': exclude} + hw = {'base': source_dir} print(f"[snipper] Synchronizing directories: {hw['base']} -> {out}") if os.path.exists(dest_dir): shutil.rmtree(dest_dir) shutil.copytree(source_dir, dest_dir) - time.sleep(0.2) + time.sleep(0.1) ls = list(Path(dest_dir).glob('**/*.*')) acceptable = [] @@ -50,6 +59,10 @@ def snip_dir(source_dir, # Sources m = [fnmatch.fnmatch(split, ex) for ex in exclude] acceptable.append( (l, not any(m) )) + # for f,ac in acceptable: + # if not ac: + # print(f) + # print(acceptable) # now we have acceptable files in list. # run_out_dirs = ["./output"] @@ -58,7 +71,7 @@ def snip_dir(source_dir, # Sources # edirs = {os.path.normpath(os.path.dirname(f_) if not os.path.isdir(f_) else f_) for f_ in edirs} # edirs.remove(os.path.normpath(out)) for f, accept in acceptable: - if os.path.isdir(f) or not str(f).endswith(".py"): # We only touch .py files. + if os.path.isdir(f) or not str(f).endswith(".py") or str(f).endswith("_grade.py"): # We only touch .py files. continue # f_dir = os.path.normpath(f if os.path.isdir(f) else os.path.dirname(f)) if accept: @@ -70,15 +83,18 @@ def snip_dir(source_dir, # Sources # if "assignments" in str(f) and "_grade.py" in str(f): # continue - info = {'new_references': [], 'code_copyright': 'Example student code. This file is automatically generated from the files in the instructor-directory'} + # info = {'new_references': [], 'code_copyright': 'Example student code. This file is automatically generated from the files in the instructor-directory'} # paths = {} solution_list = [] kwargs = {} - cut_files = True - run_files = True + # cut_files = True + # copyright() + + # run_files = True nrem = censor_file(f, run_files=run_files, run_out_dirs=output_dir, cut_files=cut_files, solution_list=solution_list, base_path=dest_dir, references=references, + license_head=license_head, **kwargs) if nrem > 0: print(f"{nrem}> {f}") diff --git a/src/snipper/snipper_main.py b/src/snipper/snipper_main.py index 4b9e7da801d544266fa1d473558daf5bd3d1ddad..b2f5803f5f52ee03e15a652fa91ad0df6e964a2c 100644 --- a/src/snipper/snipper_main.py +++ b/src/snipper/snipper_main.py @@ -33,7 +33,8 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio censor_files=True, base_path=None, strict=True, - references=None): + references=None, + license_head=None): if references == None: references = {} @@ -55,7 +56,7 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio print("Error in file, cite/reference tag not found!>", file) raise e - if run_files or cut_files: + if (run_files or cut_files) and run_out_dirs is not None: ofiles = [] for rod in [run_out_dirs]: ofiles.append(os.path.join(rod, os.path.basename(file).split(".")[0]) ) @@ -96,12 +97,20 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio # with open(sout, "w") as f: # f.write(sol) - if len(lines[-1])>0: + if len(lines) > 0 and len(lines[-1])>0: lines.append("") s2 = "\n".join(lines) + if license_head is not None: + s2 = fix_copyright(s2, license_head) + + with open(file, 'w', encoding='utf-8') as f: f.write(s2) return nB -# lines: 294, 399, 420, 270 + +def fix_copyright(s, license_head): + return "\n".join( ["# " + l.strip() for l in license_head.splitlines()] ) +"\n" + s + +# lines: 294, 399, 420, 116 \ No newline at end of file