python学习笔记:python爬取github图片
总体思路
关键:每张图片有一个地址,通过找规律构建要爬取图片的地址
代码会自动将爬取到的图片保存到文件夹下:结果如下图
import requests
import json
class ImageSpider():
def __init__(self):
self.headers = {'User-Agent':"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3642.0 Mobile Safari/537.36"}
self.url = "https://raw.githubusercontent.com/thewintersun/tensorflowbook/master/Chapter5/5.5/data/test/1/100{}.png" # 爬取图片的地址
self.ind = 1
def get_url(self):
# get url list
url_list = [self.url.format(i) for i in range(1, 10)] #找规律拼接所要爬取的图片的链接,一张图片一个链接
return url_list
def request_data(self, url): #发送请求
# request for image data
res = requests.get(url, headers=self.headers)
# print(res.status_code)
return res
def run(self):
# start to run the codes
url_list = self.get_url()
for url in url_list:
img_data = self.request_data(url)
# self.storage_data(img_data)
'''先在桌面创建image文件夹,图名+后缀'''
fp = open(r'C:\Users\Ronystar\Desktop\image\image' + str(self.ind) + '.png', 'wb')
fp.write(img_data.content)
fp.close()
self.ind += 1
if __name__ == '__main__':
spider = ImageSpider()
spider.run()