PythonによるTwitterのスクレイピングについて
このサイトを参考にスクレイピングを行っているのですが、このままだとプロフィールからタイムラインで取得できる情報のみで取得期間が3か月程度になってしまいます。ユーザーの全ツイートを取得するために
Python3
1# -*- coding: utf-8 -*- 2import sys 3import os 4import bs4 5import json 6import urllib 7import requests 8import re 9from da_vinci import Image 10from time import sleep 11 12crawle_user_id = '' 13 14def outputCsv(screen_name, user_name, tweet_timestamp, tweet_body, alfa, beta): 15 file_name = 'output.csv' 16 17 screen_name = screen_name.replace('\n','').replace('\r','') 18 user_name = user_name.replace('\n','').replace('\r','') 19 tweet_body = tweet_body.replace('\n','').replace('\r','') 20 21 f = open(file_name, 'a') 22 f.write(screen_name + ',' + user_name + ',' + tweet_timestamp + ',' + tweet_body + ',' + alfa + ',' + beta + "\n") 23 f.close() 24 25 26def parseHtml(html_data): 27 soup = bs4.BeautifulSoup(html_data) 28 29 tweet_blocks = soup.select('.tweet') 30 31 for tweet_block in tweet_blocks: 32 screen_name = tweet_block.select('.fullname')[0].text 33 user_name = tweet_block.select('.username')[0].text 34 tweet_timestamp = tweet_block.select('.tweet-timestamp')[0].text 35 tweet_body = tweet_block.select('.tweet-text')[0].text 36 alfa = tweet_block.select('.ProfileTweet-actionCountForPresentation')[1].text 37 beta = tweet_block.select('.ProfileTweet-actionCountForPresentation')[3].text 38 39 al = str(alfa) 40 41 if al == "0": 42 print("exceping") 43 else: 44 print("accepting") 45 outputCsv(screen_name, user_name, tweet_timestamp, tweet_body, alfa, beta) 46 47def getMinPosition(html_data): 48 soup = bs4.BeautifulSoup(html_data) 49 50 min_position = soup.select('div[data-min-position]')[0].attrs['data-min-position'] 51 52 return min_position 53 54def getNextTweet(user_id, max_position): 55# base_url = 'https://twitter.com/'+ user_id +'/status/'+ max_position +'&reset_error_state=false' 56 base_url = 'https://twitter.com/'+ user_id + '/status/timeline/tweets?include_available_features=1&include_entities=1&max_position='+ max_position +'&s$ 57 url_data = urllib.request.urlopen(base_url) 58 59 content = json.loads(url_data.read().decode('utf8')) 60 61 next_position = content['min_position'] 62 63 parseHtml(content['items_html']) 64 65 getNextTweet(user_id, next_position) 66 67 68def getFirstTweet(user_id): 69 url_data = urllib.request.urlopen('https://twitter.com/' + user_id) 70 71 html_data = url_data.read() 72 73 parseHtml(html_data) 74 75 max_position = getMinPosition(html_data) 76 77 getNextTweet(crawle_user_id, max_position) 78 79getFirstTweet(crawle_user_id)
のようにしたのですが、
collectt2.py:48: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("lxml"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 48 of the file collectt2.py. To get rid of this warning, pass the additional argument 'features="lxml"' to the BeautifulSoup constructor.
soup = bs4.BeautifulSoup(html_data)
Traceback (most recent call last):
File "collectt2.py", line 80, in <module>
getFirstTweet(crawle_user_id)
File "collectt2.py", line 78, in getFirstTweet
getNextTweet(crawle_user_id, max_position)
File "collectt2.py", line 58, in getNextTweet
url_data = urllib.request.urlopen(base_url)
File "/usr/lib/python3.5/urllib/request.py", line 163, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.5/urllib/request.py", line 472, in open
response = meth(req, response)
File "/usr/lib/python3.5/urllib/request.py", line 582, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python3.5/urllib/request.py", line 510, in error
return self._call_chain(*args)
File "/usr/lib/python3.5/urllib/request.py", line 444, in _call_chain
result = func(*args)
File "/usr/lib/python3.5/urllib/request.py", line 590, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 404: Not Found
とエラーが出ます
どうすればよいのでしょうか
回答2件
あなたの回答
tips
プレビュー
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2018/12/09 11:10