私のコードでは、102の代わりに44のリンクデータしか与えられません。誰かがそれを抽出する理由を私に言うことができますか?私はあなたの助けに感謝します。ウェブサイトからすべてのデータをスクラップする方法は?
import scrapy
class ProjectItem(scrapy.Item):
title = scrapy.Field()
owned = scrapy.Field()
Revenue2014 = scrapy.Field()
Revenue2015 = scrapy.Field()
Website = scrapy.Field()
Rank = scrapy.Field()
Employees = scrapy.Field()
headquarters = scrapy.Field()
FoundedYear = scrapy.Field()
クラスProjectSpider(scrapy.Spider):
name = "cin100"
allowed_domains = ['cincinnati.com']
start_urls = ['http://www.cincinnati.com/story/money/2016/11/26/see-which-companies-16-deloitte-100/94441104/']
def parse(self, response):
# get selector for all 100 companies
sel_companies = response.xpath('//p[contains(.,"click or tap here.")]/following-sibling::p/a')
# create request for every single company detail page from href
for sel_companie in sel_companies:
href = sel_companie.xpath('./@href').extract_first()
url = response.urljoin(href)
request = scrapy.Request(url, callback=self.parse_company_detail)
yield request
def parse_company_detail(self, response):
# On detail page create item
item = ProjectItem()
# get detail information with specific XPath statements
# e.g. title is the first paragraph
item['title'] = response.xpath('//div[@role="main"]/p[1]//text()').extract_first().rsplit('-')[1]
# e.g. family owned has a label we can select
item['owned'] = response.xpath('//div[@role="main"]/p[contains(.,"Family owned")]/text()').extract_first()
item['Revenue2014'] ='$'+response.xpath('//div[@role="main"]/p[contains(.,"2014")]/text()').extract_first().rsplit('$')[1]
item['Revenue2015'] ='$'+response.xpath('//div[@role="main"]/p[contains(.,"$")]/text()').extract_first().rsplit('$')[1]
item['Website'] = response.xpath('//div[@role="main"]/p/a[contains(.,"www.")]/@href').extract_first()
item['Rank'] = response.xpath('//div[@role="main"]/p[contains(.,"rank")]/text()').extract_first()
item['Employees'] = response.xpath('//div[@role="main"]/p[contains(.,"Employ")]/text()').extract_first()
item['headquarters'] = response.xpath('//div[@role="main"]/p[10]//text()').extract()
item['FoundedYear'] = response.xpath('//div[@role="main"]/p[contains(.,"founded")]/text()').extract()
# Finally: yield the item
yield item