python爬虫网站代码嵌套多层

418次阅读
没有评论
python爬虫网站代码嵌套多层

#-*- coding: utf-8 -*-

importscrapyfrom nosta.items importNostaItemimporttimeimporthashlibclassNostaSpider(scrapy.Spider):

name= "nosta"allowed_domains= ["nosta.gov.cn"]

start_urls=["http://www.nosta.gov.cn/upload/2017slgb/showProject.html",

]defparse(self, response):for sel1 in response.xpath('//a/@href').extract():#存储链接自身组名group_name

group_name = response.xpath('//a[@href="%s"]/text()'%(sel1)).extract()[0]#存储链接前的顺序号group_number

group_number = response.xpath('//a[@href="%s"]/parent::*/preceding-sibling::*/text()'%(sel1)).extract()[0]#存储目录名directory_name

directory_name = response.xpath('//a[@href="%s"]/parent::*/parent::*/parent::*/parent::*/preceding-sibling::*/text()'%(sel1)).extract()[0]#存储链接本身group_url

group_url =response.urljoin(sel1)#url1 = "http://www.nosta.gov.cn/upload/2017slgb/" + sel1

yield scrapy.Request(url = group_url, meta = {"group_name":group_name, "group_number":group_number, "directory_name":directory_name, "group_url":group_url}, callback=self.parse_url, dont_filter=True)defparse_url(self, response):#item = response.meta['item']

group_name = response.meta["group_name"]

group_number= response.meta["group_number"]

directory_name= response.meta["directory_name"]

group_url= response.meta["group_url"]for sel2 in response.xpath('//a/@href').extract():#存储链接前的顺序号project_number

project_number = response.xpath('//a[@href="%s"]/parent::*/preceding-sibling::*/text()'%(sel2)).extract()[0]#存储链接本身project_url

project_url =response.urljoin(sel2)#存储链接自身工程名project_name

project_name = response.xpath('//a[@href="%s"]/text()'%(sel2)).extract()[0]#url2 = response.urljoin(sel2)

yield scrapy.Request(url = project_url, meta = {"group_name":group_name, "group_number":group_number, "directory_name":directory_name, "group_url":group_url, "project_number":project_number, "project_url":project_url, "project_name":project_name}, callback=self.parse_item, dont_filter=True)defparse_item(self, response):

item=NostaItem()

item["time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

item["year"] = ["2017"]

item["group_name"] = response.meta["group_name"]

item["group_number"] = response.meta["group_number"]

item["directory_name"] = response.meta["directory_name"]

item["group_url"] = response.meta["group_url"]

item["project_number"] = response.meta["project_number"]

item["project_url"] = response.meta["project_url"]

item["project_name"] = response.meta["project_name"]#存储详情页源代码project_html

item["project_html"] =response.body#存储合作人关系链接file_urls

s1 = u'完成人合作关系说明:'item["file_urls"] = ['http://www.nosta.gov.cn/upload/2017slgb'+i.replace('..', '') for i in response.xpath("//td[text() = '%s']/following-sibling::*/a/@href"%(s1)).extract()]

sha_1=hashlib.sha1()

item["files"] =[]for i in item["file_urls"]:

dict1={}

dict1["url"] =i

sha_1.update(i)

dict1["path"] = sha_1.hexdigest() + ".pdf"item["files"].append(dict1)#存储所有图片链接image_urls

item["image_urls"] = ['http://www.nosta.gov.cn/upload/2017slgb'+i.replace('..', '') for i in response.xpath('//img[@width="840px"]/@src').extract()]#存储所有图片本地地址和图片名(列表中存存字典)images

sha_2 =hashlib.sha1()

item["images"] =[]for i in item["image_urls"]:

dict2={}

dict2["url"] =i

sha_2.update(i)

dict2["path"] = sha_2.hexdigest() + ".jpg"item["images"].append(dict2)#存储详情页中具体内容project_content

dict3 ={}

project_detail= response.xpath('//td[@class="label"]/text()').extract()for j inproject_detail:

dict3[j]= response.xpath("//td[text() = '%s']/following-sibling::*"%(j)).xpath('string(.)').extract()[0]if notdict3[j]:

dict3[j]= ['http://www.nosta.gov.cn/upload/2017slgb'+i.replace('..', '') for i in response.xpath("//td[text() = '%s']/following-sibling::*/img/@src"%(j)).extract()]

item["project_content"] =dict3yield item

神龙|纯净稳定代理IP免费测试>>>>>>>>天启|企业级代理IP免费测试>>>>>>>>IPIPGO|全球住宅代理IP免费测试

相关文章:

版权声明:Python教程2022-10-25发表,共计3731字。
新手QQ群:570568346,欢迎进群讨论 Python51学习