scrapy (3): 참조 요청

5264 단어
scrapy 참조 요청
1. 데이터 구조 item. py 파일 정의
'''
field: item.py
'''
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class MovieprojectItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    #     
    #           
    post = scrapy.Field()
    name = scrapy.Field()
    _type = scrapy.Field()

    #           
    director = scrapy.Field()
    design = scrapy.Field()
    actor = scrapy.Field()
    info = scrapy.Field()



2. 파충류 파일
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import scrapy
from movieproject.items import MovieprojectItem


class MovieSpider(scrapy.Spider):
    name = 'movie'
    allowed_domains = ['www.id97.com']
    start_urls = ['http://www.id97.com/movie/']
    url = 'http://www.id97.com/movie/?page={}'
    page = 1
    
    '''
    (1)         ,           
    (2)       ,          ,           
    '''


    def parse(self, response):
        #       movie_div
        movie_div_list = response.xpath('//div[starts-with(@class,"col-xs-1-5")]')
        #      div,           
        for odiv in movie_div_list:
            item = MovieprojectItem()
            #       
            item['post'] = odiv.xpath(".//img/@data-original").extract_first()

            #       
            item['name'] = odiv.xpath("./div/div/h1/a/text()").extract_first()
            #       
            item['_type'] = odiv.xpath("./div/div/div/a/text()").extract()

            #       
            detail_href = odiv.xpath('./div/a/@href').extract_first()
						'''
						         
             item       ,                  
                    ,            ,  meta    Request    
						'''
            
            yield scrapy.Request(url=detail_href,callback=self.parse_detail, meta={'item': item})
#       
        if self.page <= 5:
            self.page += 1
            url = self.url.format(self.page)
            print(url)
            yield scrapy.Request(url=url, callback=self.parse)


    def parse_detail(self,response):
        #              item
        item = response.meta['item']
        #                    
        #     
        item['director'] = response.xpath("//div[starts-with(@class,'col-xs-8')]/table/tbody/tr/td[2]/a/text()").extract()
        #     
        item['design'] = response.xpath("//div[starts-with(@class,'col-xs-8')]/table/tbody/tr[2]/td[2]/a/text()").extract()
        #     
        item['actor'] = response.xpath("//div[starts-with(@class,'col-xs-8')]/table/tbody/tr[3]/td[2]/a/text()").extract()
        #       
        item['info'] = response.xpath("//div[@class='col-xs-12 movie-introduce']/p/text()").extract_first()
				
        #  item   
        yield item



3. 파이프 파일
# -*- coding: utf-8 -*-
'''
filed: pipelines.py
'''
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import json
from scrapy.utils.project import get_project_settings
import pymysql

class MovieprojectPipeline(object):
    def open_spider(self,spider):
        self.fp = open("movie.json","w",encoding="utf8")
    def process_item(self, item, spider):
        obj = dict(item)
        string = json.dumps(obj,ensure_ascii=False)
        self.fp.write(string+'
') # print(" ") return item def close_spider(self,spider): self.fp.close() class MovieMysqlPipeline(object): def open_spider(self,spider): # settings = get_project_settings() # host = settings['DB_HOST'] port = settings['DB_PORT'] user = settings['DB_USER'] pwd = settings['DB_PWD'] name = settings['DB_NAME'] charset = settings['DB_CHARSET'] self.conn = pymysql.connect(host=host, port=port, user=user, password=pwd, db=name, charset=charset) def process_item(self, item, spider): # sql sql = 'insert into movie(post, name, type, director, design, actor, info) values("%s","%s","%s","%s","%s","%s","%s")' % (item['post'], item['name'], item['_type'], item['director'], item['design'], item['actor'], item['info']) # cursor = self.conn.cursor() # sql try: cursor.execute(sql) self.conn.commit() except Exception as e: self.conn.rollback() return item def close_spider(self,spider): # self.conn.close()

좋은 웹페이지 즐겨찾기