創(chuàng)建項(xiàng)目
scrapy startproject tencent
編寫items.py
寫class TencentItem
import scrapy
class TencentItem(scrapy.Item):
# define the fields for your item here like:
# 職位名
positionname = scrapy.Field()
# 詳情連接
positionlink = scrapy.Field()
# 職位類別
positionType = scrapy.Field()
# 招聘人數(shù)
peopleNum = scrapy.Field()
# 工作地點(diǎn)
workLocation = scrapy.Field()
# 發(fā)布時(shí)間
publishTime = scrapy.Field()
創(chuàng)建基礎(chǔ)類的爬蟲
scrapy genspider tencentPosition"tencent.com"
tencentPosition.py
# -*- coding: utf-8 -*-
import scrapy
from tencent.items import TencentItem
class TencentpositionSpider(scrapy.Spider):
name = "tencent"
allowed_domains = ["tencent.com"]
url = "http://hr.tencent.com/position.php?&start="
offset = 0
start_urls = [url + str(offset)]
def parse(self, response):
for each in response.xpath("//tr[@class='even'] | //tr[@class='odd']"):
# 初始化模型對(duì)象
item = TencentItem()
item['positionname'] = each.xpath("./td[1]/a/text()").extract()[0]
# 詳情連接
item['positionlink'] = each.xpath("./td[1]/a/@href").extract()[0]
# 職位類別
item['positionType'] = each.xpath("./td[2]/text()").extract()[0]
# 招聘人數(shù)
item['peopleNum'] = each.xpath("./td[3]/text()").extract()[0]
# 工作地點(diǎn)
item['workLocation'] = each.xpath("./td[4]/text()").extract()[0]
# 發(fā)布時(shí)間
item['publishTime'] = each.xpath("./td[5]/text()").extract()[0]
yield item
if self.offset < 1680:
self.offset += 10
# 每次處理完一頁的數(shù)據(jù)之后,重新發(fā)送下一頁頁面請(qǐng)求
# self.offset自增10,同時(shí)拼接為新的url,并調(diào)用回調(diào)函數(shù)self.parse處理Response
yield scrapy.Request(self.url + str(self.offset), callback = self.parse)
管道文件
pipelines.py
import json
class TencentPipeline(object):
def __init__(self):
self.filename = open("tencent.json", "w")
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii = False) + ",n"
self.filename.write(text.encode("utf-8"))
return item
def close_spider(self, spider):
self.filename.close()
在settings文件設(shè)置pipelines
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}
添加請(qǐng)求報(bào)頭
DEFAULT_REQUEST_HEADERS
settings.py
BOT_NAME = 'tencent'
SPIDER_MODULES = ['tencent.spiders']
NEWSPIDER_MODULE = 'tencent.spiders'
ROBOTSTXT_OBEY = True
DOWNLOAD_DELAY = 2
DEFAULT_REQUEST_HEADERS = {
"User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; windows NT 6.1; Trident/5.0;",
'Accept': 'text/html,Application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}






