python爬取微博评论的实例讲解

(编辑:jimmy 日期: 2024/12/27 浏览:2)

python爬虫是程序员们一定会掌握的知识,练习python爬虫时,很多人会选择爬取微博练手。python爬虫微博根据微博存在于不同媒介上,所爬取的难度有差异,无论是python新入手的小白,还是已经熟练掌握的程序员,可以拿来练手。本文介绍python爬取微博评论的代码实例。

一、爬虫微博

与QQ空间爬虫类似,可以爬取新浪微博用户的个人信息、微博信息、粉丝、关注和评论等。

爬虫抓取微博的速度可以达到 1300万/天 以上,具体要视网络情况。

难度程度排序:网页端>手机端>移动端。微博端就是最好爬的微博端。

二、python爬虫爬取微博评论

第一步:确定评论用户的id

# -*- coding:utf-8 -*-
import requests
import re
import time
import pandas as pd
urls = 'https://m.weibo.cn/api/comments/show"htmlcode">
tags = re.compile('</"htmlcode">
def get_comment(url):
j = requests.get(url, headers=headers).json()
comment_data = j['data']['data']
for data in comment_data:
try:

第四步:利用正则表达式去除文本中的html标签

comment = tags.sub('', data['text']) # 去掉html标签
reply = tags.sub('', data['reply_text'])
weibo_id = data['id']
reply_id = data['reply_id']
comments.append(comment)
comments.append(reply)
ids.append(weibo_id)
ids.append(reply_id)

第五步:爬取评论

df = pd.DataFrame({'ID': ids, '评论': comments})
df = df.drop_duplicates()
df.to_csv('观察者网.csv', index=False, encoding='gb18030')

实例扩展:

# -*- coding: utf-8 -*-
# Created : 2018/8/26 18:33
# author :GuoLi
 
import requests
import json
import time
from lxml import etree
import html
import re
from bs4 import BeautifulSoup
 
 
class Weibospider:
 def __init__(self):
  # 获取首页的相关信息:
  self.start_url = 'https://weibo.com/u/5644764907"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
   "accept-encoding": "gzip, deflate, br",
   "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
   "cache-control": "max-age=0",
   "cookie": 使用自己本机的cookie,
   "referer": "https://www.weibo.com/u/5644764907",
   "upgrade-insecure-requests": "1",
   "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36",
  }
  self.proxy = {
   'HTTP': 'HTTP://180.125.70.78:9999',
   'HTTP': 'HTTP://117.90.4.230:9999',
   'HTTP': 'HTTP://111.77.196.229:9999',
   'HTTP': 'HTTP://111.177.183.57:9999',
   'HTTP': 'HTTP://123.55.98.146:9999',
  }
 
 def parse_home_url(self, url): # 处理解析首页面的详细信息(不包括两个通过ajax获取到的页面)
  res = requests.get(url, headers=self.headers)
  response = res.content.decode().replace("\\", "")
  # every_url = re.compile('target="_blank" href="(/\d+/\w+\" rel="external nofollow"  ', re.S).findall(response)
  every_id = re.compile('name=(\d+)', re.S).findall(response) # 获取次级页面需要的id
  home_url = []
  for id in every_id:
   base_url = 'https://weibo.com/aj/v6/comment/big"//div[@class='list_li S_line1 clearfix']/div[@class='WB_face W_fl']/a/img/@alt") # 评论人的姓名
  info = html.xpath("//div[@node-type='replywrap']/div[@class='WB_text']/text()") # 评论信息
  info = "".join(info).replace(" ", "").split("\n")
  info.pop(0)
  comment_time = html.xpath("//div[@class='WB_from S_txt2']/text()") # 评论时间
  name_url = html.xpath("//div[@class='WB_face W_fl']/a/@href") # 评论人的url
  name_url = ["https:" + i for i in name_url]
  comment_info_list = []
  for i in range(len(name)):
   item = {}
   item["name"] = name[i] # 存储评论人的网名
   item["comment_info"] = info[i] # 存储评论的信息
   item["comment_time"] = comment_time[i] # 存储评论时间
   item["comment_url"] = name_url[i] # 存储评论人的相关主页
   comment_info_list.append(item)
  return count, comment_info_list
 
 def write_file(self, path_name, content_list):
  for content in content_list:
   with open(path_name, "a", encoding="UTF-8") as f:
    f.write(json.dumps(content, ensure_ascii=False))
    f.write("\n")
 
 def run(self):
  start_url = 'https://weibo.com/u/5644764907"第{}条微博相关评论.txt".format(i * 45 + j + 1)
    all_count, comment_info_list = self.parse_comment_info(all_url[j])
    self.write_file(path_name, comment_info_list)
    for num in range(1, 10000):
     if num * 15 < int(all_count) + 15:
      comment_url = all_url[j] + "&page={}".format(num + 1)
      print(comment_url)
      try:
       count, comment_info_list = self.parse_comment_info(comment_url)
       self.write_file(path_name, comment_info_list)
      except Exception as e:
       print("Error:", e)
       time.sleep(60)
       count, comment_info_list = self.parse_comment_info(comment_url)
       self.write_file(path_name, comment_info_list)
      del count
      time.sleep(0.2)
 
    print("第{}微博信息获取完成!".format(i * 45 + j + 1))
 
 
if __name__ == '__main__':
 weibo = Weibospider()
 weibo.run()