import os.path

from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import sys
import glob


def file_summary(file_path: str, out_dir: str):

    # se citeste fisierul dat ca argument
    f = open(file_path, "r", encoding="utf8", errors="ignore")
    text = f.read()
    # se imparte textul in cuvinte
    words = word_tokenize(text)
    # se elimina stopwords
    stop_words = set(stopwords.words("english"))
    filtered_words = [w for w in words if not w in stop_words]
    # se face rezumatul textului dat ca argument
    summary = ""
    for w in filtered_words:
        summary += w + " "
        # se afiseaza rezumatul
    print(summary)
    # scrie in fisierul rezumat.txt rezumatul textului dat ca argument

    summary_file = os.path.join(out_dir, os.path.basename(file_path))
    print(f"output: {summary_file}")

    f = open(summary_file, "w", encoding="utf8", errors="ignore")
    f.write(summary)
    f.close()


def main():
    files = glob.glob(os.path.join(sys.argv[1], "*.txt"))
    sums_dir = sys.argv[1] + "_sum"
    os.makedirs(sums_dir, exist_ok=True)
    for item in files:
        print(f"processing: {item}")
        file_summary(item, sums_dir)


main()
