-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmax_tweeted_word_final(2).py
99 lines (74 loc) · 3.79 KB
/
max_tweeted_word_final(2).py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# final build
# for users with >4000 tweets, does not pull all tweets due to equipment limitations
# uses 2 index-matched lists -- planning on working on a version using dictionaries
#
import tweepy
auth = tweepy.OAuthHandler(consumer_id, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
i = 1
while i < 15:
print('cycle ' + str(i))
users_list = []
followers_list = []
already_scanned = []
followers_doc = open('followers_page.txt')
for line in followers_doc:
already_scanned = (line.split())
#print(already_scanned)
home_user = 'maxTweetedWord'
for follower in api.followers(screen_name = home_user, count = api.get_user(home_user).followers_count ):
if not follower.protected:
followers_list.append(follower.screen_name)
followers_doc_w = open('followers_page.txt', 'a')
for follower in reversed(followers_list):
if follower not in already_scanned:
if len(users_list) < 9:
users_list.append(follower)
followers_doc_w.write(follower + ' ')
else:
continue
words_to_filter = []
filtering = open('words_to_filter.txt') # making the filtering list
for line in filtering:
words_to_filter = line.split()
print(users_list)
for user in users_list:
words_list = [] # initializing 3 main important lists
words_count_list = []
print(user)
all_tweets = [] #from this line to line 40 is getting ALL tweets by user. without this max tweet count is 200
statuses_count = api.get_user(user).statuses_count
print(statuses_count)
target_timeline = api.user_timeline(screen_name = user, count = 200)
all_tweets.extend(target_timeline)
oldest_id = all_tweets[-1].id - 1
while len(target_timeline) > 0:
target_timeline = api.user_timeline(screen_name = user, count = 50, max_id = oldest_id,)
all_tweets.extend(target_timeline)
oldest_id = all_tweets[-1].id - 1
target_timeline = api.user_timeline(screen_name = user, count = statuses_count - len(all_tweets))
all_tweets.extend(target_timeline)
print(len(all_tweets))
for tweet in all_tweets: #main block of code which scans and counts words
content = (tweet.text).split()
for word in content:
word = str(word).lower() #converting all words to smallcase for easy parsing and correctness -- to overcome issues like YES, Yes and yes all being considered to be different words
if word not in words_list:
if word not in words_to_filter: #filtering
if word[0] != '@':
words_list.append(word)
words_count_list.append(int(1))
else:
continue
else:
index = words_list.index(word)
words_count_list[index] += 1 #increment count
words_count_list_reversed = sorted(words_count_list, reverse=True) #sorting to find maximum count
most_tweeted_index = words_count_list.index(words_count_list_reversed[0]) #finding max value's index and finding the corresponding word
most_tweeted = words_list[most_tweeted_index]
print(user + " 's most tweeted word is " + most_tweeted + " (" + str(words_count_list_reversed[0]) + ")")
api.update_status(status = ("@" + user + " 's most tweeted word is " + most_tweeted + " (" + str(words_count_list_reversed[0]) + ")"))
followers_doc_w.close()
followers_doc.close()
i += 1