-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
110 additions
and
58 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
import pandas as pd | ||
import numpy as np | ||
import streamlit as st | ||
from nltk import word_tokenize | ||
from nltk.util import ngrams | ||
from sklearn.feature_extraction.text import CountVectorizer | ||
import nltk | ||
import string | ||
import re | ||
from nltk.stem import PorterStemmer | ||
from sklearn.feature_extraction.text import TfidfVectorizer | ||
from nltk.corpus import stopwords | ||
from wordcloud import WordCloud | ||
import matplotlib.pyplot as plt | ||
import streamlit as st | ||
|
||
nltk.download("omw-1.4") | ||
nltk.download("punkt") | ||
nltk.download("stopwords") | ||
nltk.download("wordnet") | ||
|
||
st.title('arXivfy me') | ||
|
||
stemmer = PorterStemmer() | ||
vectorizer = TfidfVectorizer() | ||
stpwrds = set(stopwords.words("english")) | ||
|
||
# read with pandas | ||
df_pandas = pd.read_json('arxivData.json') | ||
|
||
# convert string to python object | ||
for key in ["author", "link", "tag"]: | ||
df_pandas[key] = df_pandas[key].agg(eval, axis=0) | ||
|
||
df_pandas.head() | ||
|
||
tokens = df_pandas["summary"].agg(clean) | ||
df_pandas["tokens"] = tokens | ||
df_pandas['tokens_str'] = df_pandas['tokens'].apply(lambda x: ','.join(map(str, x))) | ||
text = " ".join(summ for summ in df_pandas.tokens_str.astype(str)) | ||
show_wordcloud(text) | ||
|
||
fig = show_wordcloud(st.slider('max_words', 5, 500, 200, step = 10)) | ||
st.pyplot(fig) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
import pandas as pd | ||
import numpy as np | ||
import streamlit as st | ||
from nltk import word_tokenize | ||
from nltk.util import ngrams | ||
from sklearn.feature_extraction.text import CountVectorizer | ||
import string | ||
import re | ||
from nltk.corpus import stopwords | ||
from wordcloud import WordCloud | ||
import matplotlib.pyplot as plt | ||
|
||
def get_top_ngram(corpus, n=None): | ||
vec = CountVectorizer(ngram_range=(n, n)).fit(corpus) | ||
bag_of_words = vec.transform(corpus) | ||
sum_words = bag_of_words.sum(axis=0) | ||
words_freq = [(word, sum_words[0, idx]) | ||
for word, idx in vec.vocabulary_.items()] | ||
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True) | ||
return words_freq[:10] | ||
|
||
def remove_linebreaks(s): | ||
return s.replace("\n", " ") | ||
|
||
def tokenize(s): | ||
return word_tokenize(s, language="english") | ||
|
||
def remove_stopwords(s): | ||
return [w for w in s if not w in stpwrds] | ||
|
||
def stem(s): | ||
return " ".join([stemmer.stem(w.lower()) for w in s]) | ||
|
||
def vectorize(s): | ||
return vectorizer.fit_transform(s) | ||
|
||
def clean(s): | ||
s = re.sub(r'\d+', '', s) # remove numbers | ||
s = "".join([char.lower() for char in s if char not in string.punctuation]) # remove punctuations and convert characters to lower case | ||
s = re.sub('\s+', ' ', s).strip() # substitute multiple whitespace with single whitespace | ||
s = remove_linebreaks(s) | ||
s = tokenize(s) | ||
s = remove_stopwords(s) | ||
# s = stem(s) | ||
return s | ||
|
||
|
||
def show_wordcloud(data, max_words): | ||
cloud = WordCloud( | ||
background_color='white', | ||
stopwords=stopwords, | ||
max_words=100, | ||
max_font_size=30, | ||
scale=3, | ||
random_state=1) | ||
|
||
output=cloud.generate(str(data)) | ||
|
||
fig = plt.figure(1, figsize=(12, 12)) | ||
plt.axis('off') | ||
|
||
plt.imshow(output) | ||
plt.show() |