python爬取微博用户个人简介等信息
import json
from scrapy import Spider, Request
from weibouser.items import WeibouserItem
class WeiboSpider(Spider):
name = 'weibo'
allowed_domains = ['weibo.cn']
def start_requests(self):
for i in range(300):
yield Request(
url="https://m.weibo.cn/api/container/getIndex?containerid=231051_-_fans_-_2619766381&luicode=10000011&lfid=1076032619766381&featurecode=20000320&since_id="+str(i),
callback=self.parse
)
def parse(self, response):
result = json.loads(response.body.decode("utf-8"))
result = result["data"]["cards"][0]["card_group"]
# print(result)
item = WeibouserItem()
# 提取json页面信息
# 当item中定义需要提取的键值队在result中就赋值,快捷的遍历所有
for data in result:
for field in item.fields:
# for i in data:
if field in data["user"]:
item[field] = data["user"][field]
print(item[field])
# with open("ww","w") as f:
# f.write(result)
yield item
# print(response.text)
from scrapy import Spider, Request
from weibouser.items import WeibouserItem
class WeiboSpider(Spider):
name = 'weibo'
allowed_domains = ['weibo.cn']
def start_requests(self):
for i in range(300):
yield Request(
url="https://m.weibo.cn/api/container/getIndex?containerid=231051_-_fans_-_2619766381&luicode=10000011&lfid=1076032619766381&featurecode=20000320&since_id="+str(i),
callback=self.parse
)
def parse(self, response):
result = json.loads(response.body.decode("utf-8"))
result = result["data"]["cards"][0]["card_group"]
# print(result)
item = WeibouserItem()
# 提取json页面信息
# 当item中定义需要提取的键值队在result中就赋值,快捷的遍历所有
for data in result:
for field in item.fields:
# for i in data:
if field in data["user"]:
item[field] = data["user"][field]
print(item[field])
# with open("ww","w") as f:
# f.write(result)
yield item
# print(response.text)