代码拉取完成,页面将自动刷新
同步操作将从 jack2583/PythonExamples 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
#!/usr/bin/env python
# coding: utf-8
# In[10]:
import numpy as np
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt
import random
# In[ ]:
#analysing tweets from the corpus
# In[14]:
positive_tweets=twitter_samples.strings('positive_tweets.json')
# In[15]:
negative_tweets=twitter_samples.strings('negative_tweets.json')
# In[16]:
all_tweets=positive_tweets+negative_tweets
# In[17]:
#Analysing sampels tweets
print(positive_tweets[random.randint(0,5000)])
# In[19]:
""" There are 4 basic steps in pre-processing of any text
1.Tokenizing
2.Removing hyper links if any
3.Converting to lower case
4.Removing punctuations
5.steeming of the word"""
import re
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
# In[20]:
#Removing Hyper links
tweet=all_tweets[1]
#removing RT words in the tweet
tweet= re.sub(r'^RT[\s]+', '', tweet)
#removing hyperlinks in the tweet
tweet= re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
#removing #symbol from the tweet
tweet= re.sub(r'#', '', tweet)
print(tweet)
# In[22]:
#Tokenizing
tokenizer=TweetTokenizer(preserve_case=False, strip_handles=True,reduce_len=True)
tokens=tokenizer.tokenize(tweet)
print(tokens)
# In[23]:
#Remving stop words and punctuation marks
stoper=stopwords.words('english')
punct=string.punctuation
print(stoper)
print(punct)
# In[24]:
cleaned=[]
for i in tokens:
if i not in stoper and i not in punct:
cleaned.append(i)
print(cleaned)
# In[25]:
#stemming
stemmer=PorterStemmer()
processed=[]
for i in cleaned:
st=stemmer.stem(i)
processed.append(st)
print(processed)
# In[ ]:
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。