import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.probability import FreqDist nltk.download('punkt_tab') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger') nltk.download('averaged_perceptron_tagger_eng') text = "natural language processing makes computer understand human language" tokens = word_tokenize(text) print("Tokens: ",tokens) stop_words = set(stopwords.words('english')) filtered_words = [word for word in tokens if word.lower() not in stop_words] print("Filtered words: ",filtered_words) freq_dist = FreqDist(filtered_words) print("Word Frequency: ",freq_dist) pos_tags = nltk.pos_tag(tokens) print("POS Tags:",pos_tags)