selenium简单爬虫实例
初学selenium,记录一下,送给正在学习的人,少走些弯路,下面先推荐经典文章。
selenium
- https://blog.csdn.net/Eastmount/article/details/48108259
- https://zhuanlan.zhihu.com/p/36268930
- https://www.cnblogs.com/yufeihlf/p/5717291.html#test10
- https://www.google.com.hk/
正则表达式:
- https://deerchao.net/tutorials/regex/regex.htm
- http://www.runoob.com/python3/python3-reg-expressions.html
- http://tool.oschina.net/uploads/apidocs/jquery/regexp.html
Xpath
实例:爬取某求职网站的信息
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import re
from time import sleep
import pandas as pd
import openpyxl
profession = "Python"
url = *********************
def enter(driver,url):
driver.get(url)
def find(driver,name):
ci=name
sums=300
count=0
driver.find_element_by_name('query').clear()
driver.find_element_by_name('query').send_keys(ci)
driver.find_element_by_xpath(r'//*[@id="wrap"]/div[4]/div/div/div[1]/form/button').click()
items=driver.find_elements_by_class_name('job-primary')
lists=[]
while(count<sums):
for i in items:
good=[]
link=i.find_element_by_tag_name('a').get_attribute('href')
profession=i.find_element_by_class_name('job-title').text
emolument=i.find_element_by_class_name('red').text
city=i.find_element_by_tag_name('p').get_attribute('innerHTML').split('<')[0]
work_experience = re.search(r'(?<=em>).*\w+(?=<em)',i.find_element_by_tag_name('p').get_attribute('innerHTML'))[0]
education_background = i.find_element_by_tag_name('p').get_attribute('innerHTML').split('>')[-1]
companyname = i.find_element_by_class_name('company-text').text.split('\n')[0]
companytype = i.find_element_by_class_name('company-text').find_element_by_tag_name('p').get_attribute('innerHTML').split('<')[0]
good.append(profession)
good.append(emolument)
good.append(city)
good.append(work_experience)
good.append(education_background)
good.append(companyname)
good.append(companytype)
good.append(link)
lists.append(good)
print(good)
count+=1
if(count==sums):break
if(count==sums):break
driver.find_element_by_class_name('next').click()
sleep(1)
items = driver.find_elements_by_class_name('job-primary')
return lists
def main():
driver=webdriver.Chrome()
driver.maximize_window()
enter(driver,url)
sleep(1)
allInfo=find(driver,profession)
data = pd.DataFrame(columns=['职业', '工资','城市','工作经验','学历','公司名称','公司类型','链接'])
for item in allInfo:
data.loc[len(data)] = [item[0], item[1], item[2], item[3],item[4],item[5],item[6],item[7]]
data.to_excel("职位信息.xlsx")
driver.quit()
if __name__ == "__main__":
main()
成果附图: