Skip to content
Snippets Groups Projects
Commit 91986252 authored by tuhe's avatar tuhe
Browse files

Snipper compatibility work with 02465

parent a70300f5
No related branches found
No related tags found
No related merge requests found
Showing
with 384 additions and 192 deletions
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
\ No newline at end of file
......@@ -14,7 +14,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
setuptools.setup(
name="codesnipper",
version="0.1.0",
version="0.1.1",
author="Tue Herlau",
author_email="tuhe@dtu.dk",
description="A lightweight framework for censoring student solutions files and extracting code + output",
......
__version__ = "0.0.1"
from snipper.snip_dir import snip_dir
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
......@@ -20,31 +20,41 @@ def block_iterate(lines, tag):
yield contents
def block_join(contents):
blk = contents['block']
if len(blk) == 1:
blk = [blk[0] + contents['post1'] + " " + contents['post2']]
elif len(blk) > 1:
blk = [blk[0] + contents['post1']] + blk[1:-1] + [blk[-1] + contents['post2']]
return contents['first'] + blk + contents['last']
def block_split(lines, tag):
stag = tag[:2] # Start of any next tag.
def join(contents):
return contents['first'] + [contents['block'][0] + contents['post1']] + contents['block'][1:-1] \
+ [contents['block'][-1] + contents['post2']] + contents['last']
contents = {}
i, j = f2(lines, tag)
def get_tag_args(line):
# line = line.strip()
k = line.find(" ")
tag_args = ((line[:k + 1] if k >= 0 else line)[len(tag):] ).strip()
tag_args = ((line[:k + 1] if k >= 0 else line)[len(tag):] ).strip().split(";")
tag_args = [t.strip() for t in tag_args]
# if len(tag_args) == 0:
# return {'': ''} # No name.
tag_args = [t for t in tag_args if len(t) > 0]
tag_args = dict([t.split("=") if "=" in t else (t.lower().strip(), True) for t in tag_args])
if len(tag_args) == 0:
return {'': ''} # No name.
if '' not in tag_args:
tag_args[''] = ''
tag_args = dict([t.split("=") for t in tag_args.split(";")])
return tag_args
if i is None:
return None
else:
print( lines[i] )
# print( lines[i] )
start_tag_args = get_tag_args(lines[i][j:])
START_TAG = f"{tag}={start_tag_args['']}" if start_tag_args[''] != '' else tag
......@@ -62,7 +72,7 @@ def block_split(lines, tag):
l2 = lines[:i] + [lines[i][:j2], lines[i][j2:]] + lines[i2+1:]
c2 = block_split(l2, tag=tag)
c2['block'].pop()
c2['joined'] = join(c2)
c2['joined'] = block_join(c2)
return c2
else:
contents['first'] = lines[:i]
......@@ -81,7 +91,7 @@ def block_split(lines, tag):
contents['arg2'], contents['post2'] = argpost(lines[i2], j2)
blk = [lines[i][:j]] + lines[i+1:i2] + [lines[i2][:j2]]
contents['block'] = blk
contents['joined'] = join(contents)
contents['joined'] = block_join(contents)
contents['start_tag_args'] = start_tag_args
contents['name'] = start_tag_args['']
return contents
......
import functools
from snipper.legacy import gcoms, block_process
from snipper.legacy import gcoms
from snipper.block_parsing import indent
from snipper.block_parsing import block_split, block_join
def fix_f(lines, debug):
......@@ -29,25 +30,18 @@ def fix_f(lines, debug):
comments = [id + c for c in comments]
if len(comments) > 0:
lines2 += comments[0].split("\n")
# lines2 += [id+"#!b"]
f = [id + l.strip() for l in funrem.splitlines()]
f[0] = f[0] + "#!b"
# lines2 += (id+funrem.strip()).split("\n")
errm = l_head if len(l_head) > 0 else "Implement function body"
f[-1] = f[-1] + f' #!b {errm}'
lines2 += f
# lines2 += [f'{id}#!b {errm}']
else:
lines2.append(l)
i += 1
return lines2
def fix_b2(lines, keep=False):
stats = {'n': 0}
def block_fun(lines, start_extra, end_extra, art, stats=None, **kwargs):
# stats = {'n': 0}
def _block_fun(lines, start_extra, end_extra, keep=False, silent=False):
id = indent(lines[0])
lines = lines[1:] if len(lines[0].strip()) == 0 else lines
lines = lines[:-1] if len(lines[-1].strip()) == 0 else lines
......@@ -59,9 +53,30 @@ def fix_b2(lines, keep=False):
if keep:
l2 = ['GARBAGE'] * cc
else:
l2 = ([id+start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.", id+f'raise NotImplementedError("{ee}")']
if silent:
l2 = []
cc = 0
else:
l2 = ([id + start_extra] if len(start_extra) > 0 else []) + [id + f"# TODO: {cc} lines missing.",
id + f'raise NotImplementedError("{ee}")']
stats['n'] += cc
# stats['n'] += cc
return l2, cc
lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats))
return lines2, stats['n'], cutout
\ No newline at end of file
def fix_b2(lines, keep=False):
cutout = []
n = 0
while True:
b = block_split(lines, tag="#!b")
if b == None:
break
args = {k:v for k, v in b['start_tag_args'].items() if len(k) > 0}
cutout += b['block']
b['block'], dn = _block_fun(b['block'], start_extra=b['arg1'], end_extra=b['arg2'], **args, keep=keep)
lines = block_join(b)
n += dn
# lines2, _, _, cutout = block_process(lines, tag="#!b", block_fun=functools.partial(block_fun, stats=stats))
return lines, n, cutout
\ No newline at end of file
......@@ -19,7 +19,7 @@ def fix_aux_special(lines, aux, command='\\nref', output='\cite[%s]{my_bibtex_en
def fix_aux(lines, aux, strict=True):
l2 = fix_single_reference(lines, aux=aux, cmd="\\ref", strict=True)
print("\n".join(l2))
# print("\n".join(l2))
return l2
def fix_bibtex(lines, bibtex):
......
......@@ -2,6 +2,11 @@ import functools
import textwrap
from snipper.legacy import block_process
from snipper.block_parsing import full_strip
import os
if os.name == 'nt':
import wexpect as we
else:
import pexpect as we
def run_i(lines, file, output):
......@@ -14,11 +19,6 @@ def run_i(lines, file, output):
lines = textwrap.dedent(s).strip().splitlines()
if extra['python'] is None:
import os
if os.name == 'nt':
import wexpect as we
else:
import pexpect as we
an = we.spawn("python", encoding="utf-8", timeout=20)
an.expect([">>>"])
extra['python'] = an
......@@ -34,8 +34,6 @@ def run_i(lines, file, output):
lines = l2
alines = []
# indented = False
in_dot_mode = False
if len(lines[-1]) > 0 and (lines[-1].startswith(" ") or lines[-1].startswith("\t")):
lines += [""]
......@@ -57,14 +55,8 @@ def run_i(lines, file, output):
if 'help(s.find)' in word:
pass
if dotmode:
# alines.append("..." + word)
alines.append(">>>" + analyzer.before.rstrip() if not in_dot_mode else "..." + analyzer.before.rstrip())
in_dot_mode = True
# if i < len(lines) - 1 and not lines[i + 1].startswith(" "):
# analyzer.sendline("\n") # going out of indentation mode .
# analyzer.expect_exact([">>>", "..."])
# alines.append("..." + analyzer.after.rstrip())
# pass
else:
alines.append( ("..." if in_dot_mode else ">>>") + analyzer.before.rstrip())
in_dot_mode = False
......
from collections import defaultdict
import os
from snipper.block_parsing import block_iterate
from snipper.snipper_main import full_strip
def get_s(lines):
""" Return snips from 'lines' """
blocks = defaultdict(list)
for c in block_iterate(lines, "#!s"):
# c['start_tag_args']
if not c['start_tag_args'].get('keeptags', False):
c['block'] = full_strip(c['block'])
else:
# In this case the #! tags are kept in.
pass
# print("keepting tags.")
blocks[c['name']].append(c)
output = {}
for name, co in blocks.items():
output[name] = [l for c in co for l in c['block']]
slines = [l for c in co for l in c['block']]
# full_strip("")
# c['block']['args']
# slines = slines[ 23]
# co.
output[name] = slines
return output
# def _s_block_process():
#
# pass
def save_s(lines, output_dir, file_path): # save file snips to disk
content = get_s(lines)
if not os.path.isdir(output_dir):
# Only make output dir if needed.
if len(content) > 0 and not os.path.isdir(output_dir):
os.mkdir(output_dir)
for name, ll in content.items():
if file_path is not None:
file_path = file_path.replace("\\", "/")
ll = [f"# {file_path}"] + ll
out = "\n".join(ll)
with open(output_dir + "/" + os.path.basename(file_path)[:-3] + ("_" + name if len(name) > 0 else name) + ".py", 'w') as f:
f.write(out)
......
import os
import io
# from coursebox.core.info_paths import get_paths
from pybtex import plugin
from pybtex.database.input import bibtex
from warnings import warn
### Newstyle loading.
def get_aux(auxfile):
# paths = get_paths()
# auxfile = os.path.join(paths['02450public'], auxfile)
if not os.path.exists(auxfile):
print(auxfile)
from warnings import warn
warn("Could not find file")
# print(auxfile)
warn("Could not find bibtex file: "+ auxfile)
return {}
with open(auxfile, 'r') as f:
......@@ -104,17 +103,7 @@ def get_bibtex(bibfile):
'filename': url,
}
# newref = {}
# ls = lambda x: x if isinstance(x, list) else [x]
# if 'tex_command' in gi:
# for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ):
# ax = parse_aux(aux, bibtex=gi['bibtex'])
# for k in ax:
# ax[k]['pyref'] = display%(ax[k]['nicelabel'],)
# newref[cmd] = ax
return refs#, newref
return refs
def find_tex_cite(s, start=0, key="\\cite"):
......@@ -132,112 +121,112 @@ def find_tex_cite(s, start=0, key="\\cite"):
return (i, j), reference, txt
### Oldstyle loading
def get_references(bibfile, gi):
"""
all references.
"""
if not os.path.exists(bibfile):
return None
pybtex_style = plugin.find_plugin('pybtex.style.formatting', 'alpha')()
pybtex_html_backend = plugin.find_plugin('pybtex.backends', 'html')()
pybtex_plain_backend = plugin.find_plugin('pybtex.backends', 'plaintext')()
pybtex_parser = bibtex.Parser()
with open(bibfile, 'r', encoding='utf8') as f:
data = pybtex_parser.parse_stream(f)
data_formatted = pybtex_style.format_entries(data.entries.values())
refs = {}
if 'auxfile' in gi:
all_references = parse_aux(gi['auxfile'], bibtex=gi['bibtex'])
else:
all_references = {}
for entry in data_formatted:
output = io.StringIO()
output_plain = io.StringIO()
pybtex_plain_backend.output = output_plain.write
pybtex_html_backend.output = output.write
pybtex_html_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_html_backend))
pybtex_plain_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_plain_backend))
html = output.getvalue()
plain = output_plain.getvalue()
entry.text.parts[-2].__str__()
url = ""
for i,p in enumerate(entry.text.parts):
if "\\url" in p.__str__():
url = entry.text.parts[i+1]
break
url = url.__str__()
i1 = html.find("\\textbf")
i2 = html.find("</span>", i1)
dht = html[i1:i2]
dht = dht[dht.find(">")+1:]
html = html[:i1] + " <b>"+dht+"</b> " + html[i2+7:]
plain = plain.replace("\\textbf ", "")
iu = plain.find("URL")
if iu > 0:
plain = plain[:iu]
refs[entry.key] = {'html': html,
'plain': plain,
'label': entry.label,
'filename': url,
'references': all_references}
newref = {}
ls = lambda x: x if isinstance(x, list) else [x]
if 'tex_command' in gi:
for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ):
ax = parse_aux(aux, bibtex=gi['bibtex'])
for k in ax:
ax[k]['pyref'] = display%(ax[k]['nicelabel'],)
newref[cmd] = ax
return refs, newref
def parse_aux(auxfile, bibtex=None):
# paths = get_paths()
paths = {}
auxfile = os.path.join(paths['02450public'], auxfile)
if not os.path.exists(auxfile):
print(auxfile)
from warnings import warn
warn("Could not find file")
return {}
with open(auxfile, 'r') as f:
items = f.readlines()
entries = {}
for e in items:
e = e.strip()
if e.startswith("\\newlabel") and "@cref" in e:
# print(e)
i0 = e.find("{")
i1 = e.find("@cref}")
key = e[i0+1:i1]
j0 = e.find("{{[", i0)+3
j1 = e.find("}", j0)
val = e[j0:j1]
label = val[:val.find("]")]
number = val[val.rfind("]")+1:]
if label == "equation":
nlabel = f"eq. ({number})"
else:
nlabel = label.capitalize() + " " + number
coderef = "\\cite[%s]{%s}"%(nlabel, bibtex) if bibtex is not None else None
entries[key] = {'pyref': coderef, 'nicelabel': nlabel, 'rawlabel': label, 'number': number}
return entries
# def get_references(bibfile, gi):
# """
# all references.
# """
# if not os.path.exists(bibfile):
# return None
#
# pybtex_style = plugin.find_plugin('pybtex.style.formatting', 'alpha')()
# pybtex_html_backend = plugin.find_plugin('pybtex.backends', 'html')()
# pybtex_plain_backend = plugin.find_plugin('pybtex.backends', 'plaintext')()
# pybtex_parser = bibtex.Parser()
#
# with open(bibfile, 'r', encoding='utf8') as f:
# data = pybtex_parser.parse_stream(f)
#
# data_formatted = pybtex_style.format_entries(data.entries.values())
# refs = {}
#
# if 'auxfile' in gi:
# all_references = parse_aux(gi['auxfile'], bibtex=gi['bibtex'])
# else:
# all_references = {}
#
# for entry in data_formatted:
# output = io.StringIO()
# output_plain = io.StringIO()
# pybtex_plain_backend.output = output_plain.write
# pybtex_html_backend.output = output.write
# pybtex_html_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_html_backend))
#
# pybtex_plain_backend.write_entry(entry.key, entry.label, entry.text.render(pybtex_plain_backend))
#
# html = output.getvalue()
# plain = output_plain.getvalue()
#
# entry.text.parts[-2].__str__()
# url = ""
# for i,p in enumerate(entry.text.parts):
# if "\\url" in p.__str__():
# url = entry.text.parts[i+1]
# break
# url = url.__str__()
# i1 = html.find("\\textbf")
# i2 = html.find("</span>", i1)
# dht = html[i1:i2]
# dht = dht[dht.find(">")+1:]
# html = html[:i1] + " <b>"+dht+"</b> " + html[i2+7:]
#
# plain = plain.replace("\\textbf ", "")
# iu = plain.find("URL")
# if iu > 0:
# plain = plain[:iu]
#
# refs[entry.key] = {'html': html,
# 'plain': plain,
# 'label': entry.label,
# 'filename': url,
# 'references': all_references}
#
# newref = {}
# ls = lambda x: x if isinstance(x, list) else [x]
# if 'tex_command' in gi:
# for cmd, aux, display in zip( ls(gi['tex_command']), ls(gi['tex_aux'] ), ls( gi['tex_display'] ) ):
# ax = parse_aux(aux, bibtex=gi['bibtex'])
# for k in ax:
# ax[k]['pyref'] = display%(ax[k]['nicelabel'],)
# newref[cmd] = ax
#
# return refs, newref
#
#
# def parse_aux(auxfile, bibtex=None):
# # paths = get_paths()
# paths = {}
# auxfile = os.path.join(paths['02450public'], auxfile)
# if not os.path.exists(auxfile):
# print(auxfile)
# from warnings import warn
# warn("Could not find file")
# return {}
#
# with open(auxfile, 'r') as f:
# items = f.readlines()
# entries = {}
# for e in items:
# e = e.strip()
# if e.startswith("\\newlabel") and "@cref" in e:
# # print(e)
# i0 = e.find("{")
# i1 = e.find("@cref}")
# key = e[i0+1:i1]
#
# j0 = e.find("{{[", i0)+3
# j1 = e.find("}", j0)
#
# val = e[j0:j1]
#
# label = val[:val.find("]")]
# number = val[val.rfind("]")+1:]
#
# if label == "equation":
# nlabel = f"eq. ({number})"
# else:
# nlabel = label.capitalize() + " " + number
#
# coderef = "\\cite[%s]{%s}"%(nlabel, bibtex) if bibtex is not None else None
# entries[key] = {'pyref': coderef, 'nicelabel': nlabel, 'rawlabel': label, 'number': number}
# return entries
......@@ -3,13 +3,21 @@ from snipper.snipper_main import censor_file
from pathlib import Path
import time
import fnmatch
import tempfile
def snip_dir(source_dir, # Sources
dest_dir, # Will write to this directory
dest_dir=None, # Will write to this directory
output_dir=None, # Where snippets are going to be stored
references=None, # Reference database
exclude=None, clean_destination_dir=True):
exclude=None, clean_destination_dir=True,
run_files=True, # Run #!o tags and #!i tags
cut_files=True, # censor files.
license_head=None,
):
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
print("[snipper]", "no destination dir was specified so using nonsense destination:", dest_dir)
if references == None:
references = dict(aux=None, bibtex=None, commands=[])
......@@ -17,14 +25,16 @@ def snip_dir(source_dir, # Sources
if exclude == None:
exclude = []
exclude += ["*__pycache__*"] # Just...no.
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir = os.path.abspath(output_dir)
source_dir = os.path.abspath(source_dir)
dest_dir = os.path.abspath(dest_dir)
if output_dir == None:
output_dir = os.path.dirname(source_dir) + "/output"
output_dir = os.path.abspath(output_dir)
if os.path.samefile( source_dir, dest_dir):
raise Exception("Source and destination is the same")
......@@ -33,15 +43,14 @@ def snip_dir(source_dir, # Sources
os.makedirs(dest_dir)
out = dest_dir
hw = {'base': source_dir,
'exclusion': exclude}
hw = {'base': source_dir}
print(f"[snipper] Synchronizing directories: {hw['base']} -> {out}")
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
shutil.copytree(source_dir, dest_dir)
time.sleep(0.2)
time.sleep(0.1)
ls = list(Path(dest_dir).glob('**/*.*'))
acceptable = []
......@@ -50,6 +59,10 @@ def snip_dir(source_dir, # Sources
m = [fnmatch.fnmatch(split, ex) for ex in exclude]
acceptable.append( (l, not any(m) ))
# for f,ac in acceptable:
# if not ac:
# print(f)
# print(acceptable)
# now we have acceptable files in list.
# run_out_dirs = ["./output"]
......@@ -58,7 +71,7 @@ def snip_dir(source_dir, # Sources
# edirs = {os.path.normpath(os.path.dirname(f_) if not os.path.isdir(f_) else f_) for f_ in edirs}
# edirs.remove(os.path.normpath(out))
for f, accept in acceptable:
if os.path.isdir(f) or not str(f).endswith(".py"): # We only touch .py files.
if os.path.isdir(f) or not str(f).endswith(".py") or str(f).endswith("_grade.py"): # We only touch .py files.
continue
# f_dir = os.path.normpath(f if os.path.isdir(f) else os.path.dirname(f))
if accept:
......@@ -70,15 +83,18 @@ def snip_dir(source_dir, # Sources
# if "assignments" in str(f) and "_grade.py" in str(f):
# continue
info = {'new_references': [], 'code_copyright': 'Example student code. This file is automatically generated from the files in the instructor-directory'}
# info = {'new_references': [], 'code_copyright': 'Example student code. This file is automatically generated from the files in the instructor-directory'}
# paths = {}
solution_list = []
kwargs = {}
cut_files = True
run_files = True
# cut_files = True
# copyright()
# run_files = True
nrem = censor_file(f, run_files=run_files, run_out_dirs=output_dir, cut_files=cut_files, solution_list=solution_list,
base_path=dest_dir,
references=references,
license_head=license_head,
**kwargs)
if nrem > 0:
print(f"{nrem}> {f}")
......
......@@ -33,7 +33,8 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio
censor_files=True,
base_path=None,
strict=True,
references=None):
references=None,
license_head=None):
if references == None:
references = {}
......@@ -55,7 +56,7 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio
print("Error in file, cite/reference tag not found!>", file)
raise e
if run_files or cut_files:
if (run_files or cut_files) and run_out_dirs is not None:
ofiles = []
for rod in [run_out_dirs]:
ofiles.append(os.path.join(rod, os.path.basename(file).split(".")[0]) )
......@@ -96,12 +97,20 @@ def censor_file(file, run_files=True, run_out_dirs=None, cut_files=True, solutio
# with open(sout, "w") as f:
# f.write(sol)
if len(lines[-1])>0:
if len(lines) > 0 and len(lines[-1])>0:
lines.append("")
s2 = "\n".join(lines)
if license_head is not None:
s2 = fix_copyright(s2, license_head)
with open(file, 'w', encoding='utf-8') as f:
f.write(s2)
return nB
# lines: 294, 399, 420, 270
def fix_copyright(s, license_head):
return "\n".join( ["# " + l.strip() for l in license_head.splitlines()] ) +"\n" + s
# lines: 294, 399, 420, 116
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment