scrapy 프레임 워 크 --- B 역 차 트 정보 획득

26006 단어 파충
1. 시작 항목
scrapy startproject Bilibili
cd Bilibili
scrapy genspider bilibili www.bilibili.com

2. 파이프 파일 items. py 는 기어 오 를 데이터 구 조 를 정의 합 니 다.
import scrapy

class BilibiliItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    #     Field ?>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    rank_type = scrapy.Field()         #     
    rank_no = scrapy.Field()           #   
    title = scrapy.Field()             #   
    play_num = scrapy.Field()          #    
    comment_num = scrapy.Field()       #    
    uploader = scrapy.Field()          #    
    score = scrapy.Field()             #   
    set_name = scrapy.Field()          # mongo    

3. 파충류 프로그램 bilibili. py
from Bilibili.items import BilibiliItem

class BilibiliSpider(scrapy.Spider):
    name = 'bilibili'
    allowed_domains = ['www.bilibili.com']
    # start_urls = ['http://www.bilibili.com/']
    base_url = 'https://www.bilibili.com/ranking/'
    rank_type = {
        'all': ['all/0/0/3', '   '],
        'origin': ['origin/0/0/3', '   '],
        'bangumi': ['bangumi/13/0/3', '   '],
        'cinema': ['cinema/177/0/3', '   '],
        'rookie': ['rookie/0/0/3', '   ']
    }

    #        ,      url       
    def start_requests(self):
        #          self
        for value in self.rank_type.values():
            url = self.base_url + value[0]
            yield scrapy.Request(url, callback=self.parse)

    def parse(self, response):
        item = BilibiliItem()
        #      url  
        url = response.url
        item['set_name'] = url.split('/')[4]
        item['rank_type'] = self.rank_type[item['set_name']][1]
        #    response  xpath  ,          
        info_list = response.xpath("//div[@class='rank-list-wrap']/ul/li")
        #      li  
        for info in info_list:
            item['rank_no'] = info.xpath("./div[@class='num']/text()")[0].extract()
            item['title'] = info.xpath(".//div[@class='info']/a/text()")[0].extract()
            #      li    ,  xpath      3   ,      
            detals = info.xpath(".//span/text()")
            item['play_num'] = detals[0].extract()
            item['comment_num'] = detals[1].extract()
            item['uploader'] = detals[2].extract()
            item['score'] = info.xpath(".//div[2]/div/text()")[0].extract()
            yield item

4. 파이프 파일 pipelines. py
import pymongo, pymysql, csv

from .settings import *

class MongoDBPipeline(object):
    def __init__(self):
        self.conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)
        self.db = self.conn['Bili']

    def process_item(self, item, spider):
        # spider      ?
        self.set = self.db[item['set_name']]
        d = dict(item)
        self.set.insert_one(d)
        return item


class CSVPipeline(object):
    #         csv     sheet ?>>>>>>>>>>>>>>>>>>>>>>>>>>>
    def process_item(self, item, spider):
        #   csv  ,    encoding   gb18030(utf-8     )
        with open(item['rank_type']+".csv", 'a', newline='', encoding='gb18030') as f:
            writer = csv.writer(f)
            writer.writerow(
                [item['rank_no'],item['title'],
                 item['play_num'],item['comment_num'],
                 item['uploader'],item['score']
                 ])
        return item


class MysqlPipeline(object):
    def __init__(self):
        self.db = pymysql.connect(MYSQL_HOST,MYSQL_USER,MYSQL_PASSWORD,MYSQL_DB,charset='utf8')
        self.cursor = self.db.cursor()

    def process_item(self, item, spider):
        # sql        ,execute         
        sql = 'insert into test values(%s,%s,%s,%s,%s,%s)'
        self.cursor.execute(sql,[
                                    item['rank_no'], item['title'],item['play_num'],
                                    item['comment_num'],item['uploader'],item['score']
                                ])
        self.db.commit()
        return item

    #             ,     
    def close_spider(self,spider):
        self.cursor.close()
        self.db.close()
        print('    ')

5. 설정 settings. py 수정
#   robots  
ROBOTSTXT_OBEY = False

#        
DEFAULT_REQUEST_HEADERS = {
    'User-Agent': 'Mozilla/5.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en',
}

#       
ITEM_PIPELINES = {
   'Bilibili.pipelines.MongoDBPipeline': 300,
    'Bilibili.pipelines.CSVPipeline': 200,
    'Bilibili.pipelines.MysqlPipeline': 100,
}

#        
MONGO_HOST = '192.168.0.106'
MONGO_PORT = 27017

MYSQL_HOST = '192.168.0.106'
MYSQL_USER = 'root'
MYSQL_PASSWORD = '123456'
MYSQL_DB = 'Scrapy'

#            
LOG_LEVEL = 'WARNING'
LOG_FILE = 'spider.log'

6. 프로젝트 시작: begin. py 만 들 기 (scrapy. cfg 파일 과 같은 디 렉 터 리)
from scrapy import cmdline

cmdline.execute('scrapy crawl bilibili'.split())

좋은 웹페이지 즐겨찾기