diff --git a/exercises/02450Toolbox_Python/Scripts/ex3_1_3.py b/exercises/02450Toolbox_Python/Scripts/ex3_1_3.py index 8dbc103785c16864f67a6bc1bce846eb6332f3c3..c3ac96c2ad678edc8786ea59973292ad3b80a9e1 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex3_1_3.py +++ b/exercises/02450Toolbox_Python/Scripts/ex3_1_3.py @@ -3,9 +3,7 @@ import importlib_resources from sklearn.feature_extraction.text import CountVectorizer filename_docs = importlib_resources.files("dtuimldmtools").joinpath("data/textDocs.txt") -filename_stop = importlib_resources.files("dtuimldmtools").joinpath( - "stopWords.txt" -) +filename_stop = importlib_resources.files("dtuimldmtools").joinpath("data/stopWords.txt") # As before, load the corpus and preprocess: with open(filename_docs, "r") as f: diff --git a/exercises/02450Toolbox_Python/Scripts/ex3_1_4.py b/exercises/02450Toolbox_Python/Scripts/ex3_1_4.py index b71c5b3396b701e04ae7f7be58b5219dc42ede43..e33ee98041ad4eddb1533e2553763c881c2e5e73 100644 --- a/exercises/02450Toolbox_Python/Scripts/ex3_1_4.py +++ b/exercises/02450Toolbox_Python/Scripts/ex3_1_4.py @@ -9,9 +9,7 @@ from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import CountVectorizer filename_docs = importlib_resources.files("dtuimldmtools").joinpath("data/textDocs.txt") -filename_stop = importlib_resources.files("dtuimldmtools").joinpath( - "stopWords.txt" -) +filename_stop = importlib_resources.files("dtuimldmtools").joinpath("data/stopWords.txt") # As before, load the corpus and preprocess: with open(filename_docs, "r") as f: