python实现爬虫,代理服务器proxy,抓取指定站点数据, 存储到文件
'''
@date 20180330
@author: lml
'''
#coding:utf-8
import requests
from lxml import html
import sys
import os
import time
webpage ="http://www.youde.net"
curdate = time.strftime("%Y%m%d", time.localtime()) #当前日期
url=webpage+'/Svante/channels/825.html'
curproxy=''
#使用代理服务器 代理服务器存在文件里面
foProxy = open("proxy/proxylist.txt", "r+")
proxyArray = foProxy.readlines() #读取文件成数组
foProxy.close()
#遍历代理
for proxyitem in proxyArray:
curproxy =proxyitem.replace('\n','')
print(curproxy)
try:
page=requests.Session().get('http://ip.chinaz.com/',proxies={curproxy})
print('xxxxxxxxxxxxxxxxxxxx代理可用')
except Exception as e:
print('当前代理用不了')
page=requests.Session().get(url)
tree=html.fromstring(page.text.encode('latin1').decode('utf-8'))
#result=tree.xpath('//ul[@class="list-unstyled"]//li//a/div/span/text()')
urlArray=tree.xpath('//ul[@class="list-unstyled"]//li//a/@href') #也所有的链接
#创建目录
if os.path.exists('AutoGenHtml/'+curdate):
#存在就先删除目录
#os.rmdir('AutoGenHtml/'+curdate)
print('目录已存在')
else:
#不存在就创建目录
os.mkdir("AutoGenHtml/"+curdate)
#获取到链接列表,再分别发送请求 遍历数组
for item in urlArray: # 第二个实例
s_url=webpage+item
#print(s_url)
c_filename= item.replace('/Svante/contents/825/','').replace('.html','')
#print(c_filename)
s_page=requests.Session().get(s_url)
s_tree=html.fromstring(s_page.text.encode('latin1').decode('utf-8'))
s_title=s_tree.xpath('//div[@class="g-pl-20--lg"]//h2//text()') #获取标题
s_itemno=s_tree.xpath('//div[@class="g-pl-20--lg"]//span[1]//div/text()') #获取项目编号
s_time=s_tree.xpath('//div[@class="g-pl-20--lg"]//span[2]//div/text()') #获取项目发布时间
s_content=s_tree.xpath('//div[@class="g-pl-20--lg"]//span[3]//text()') #获取项目正文
htmls=''
for sitem in s_content:
htmls += sitem +'\n'
#文件输出
fo = open('AutoGenHtml/'+curdate+'/'+c_filename+".txt", "w",encoding='utf8')
fo.write('标题:' + s_title[0]+'\n')
fo.write('项目编号:' + s_itemno[0]+'\n')
fo.write('发布时间:' + s_time[0]+'\n')
fo.write('正文:' + htmls+','+'\n')
fo.close()