STOPWORDS

lexnlp.nlp.en.tokens.STOPWORDS = {"isn't", "should've", 516, 519, 'needn', 'doing', 'any', 'before', 535, 'whom', 'further', 'themselves', 4136, 'because', 1068, 'same', 'nor', 'those', 'this', 'o', 'from', 'here', 'all', "it's", 'these', 'where', 'just', 'can', 1604, "weren't", 5195, "couldn't", 'yourself', 'both', 'being', 'off', 597, 'wouldn', 'only', "hasn't", 'weren', 'which', 'on', 'hers', 'such', 'you', 'for', 'me', 'into', 'as', 'in', "doesn't", 'have', 1660, 1661, 'if', 'should', 'she', 'we', 'them', 'won', 'a', 671, 'when', 'most', 'above', "aren't", 'himself', 'that', 'd', 'hasn', 'once', 2227, 'how', 'of', 696, 'was', 'not', 'theirs', 'he', "you'll", 'its', 2252, 'had', 'does', 'our', 'while', 'itself', 'having', 'will', 'there', 'yours', 'him', 'haven', 'and', 'what', 756, 'is', "you've", 'at', 'they', 'his', 'herself', 'to', 'during', 'myself', "haven't", 3334, 'are', 'aren', 'so', 'been', 'am', 'do', "hadn't", 's', 'their', 'doesn', 'through', "don't", 'over', 'between', 'm', 'hadn', "won't", 822, "mustn't", "shan't", 'shouldn', 'isn', 'very', 'against', 'my', 'after', 'by', 'why', "wouldn't", 'wasn', 'ain', 'your', 'own', "that'll", 363, 'too', 368, 'some', 'her', 'an', 'other', 'then', 'or', 378, 380, 'about', 'the', 'has', 'ourselves', 'mightn', 'didn', 'ma', "needn't", 'll', 'no', 'until', 1947, 414, 'now', 'did', 'who', "she's", 'below', 'y', "you'd", 'up', 'than', 'mustn', 'yourselves', 444, 958, 'down', 't', 'it', 454, 'with', 457, 458, 'again', 've', "you're", 465, 466, 'but', 'be', 'out', 're', 981, 'i', 482, 'were', 'under', 485, 'ours', "mightn't", 1512, 'couldn', 'more', "didn't", 'shan', 492, 1522, 'don', 'each', 'few', "shouldn't", "wasn't"}

set() -> new empty set object set(iterable) -> new set object

Build an unordered collection of unique elements.