龙空技术网

python之Scrapy爬虫案例:豆瓣

局外人LZ 107

前言:

现在看官们对“豆瓣app爬虫”大约比较关注,各位老铁们都需要剖析一些“豆瓣app爬虫”的相关资讯。那么小编在网络上搜集了一些关于“豆瓣app爬虫””的相关资讯,希望各位老铁们能喜欢,兄弟们快快来了解一下吧!

python模块之Scrapy爬虫框架

运行命令创建项目:scrapy startproject scrapySpider进入项目目录:cd .\scrapySpider\运行命令创建爬虫:scrapy genspider douban movie.douban.com目录结构说明

|-- scrapySpider 项目目录|   |-- scrapySpider 项目目录|   |   |-- spiders 爬虫文件目录|   |   |   |-- douban.py 爬虫文件|   |   |-- items.py 定义数据模型文件,类似于数据库表的模式或数据结构的定义|   |   |-- middlewares.py 定义中间件文件,用于对请求和响应进行处理和修改|   |   |-- pipelines.py 定义数据处理管道(Pipeline)文件,用于处理爬取到的数据的组件|   |   |-- settings.py 定义配置文件,用于配置 Scrapy 项目的各种设置选项和参数|   |-- scrapy.cfg 框架中的配置文件,用于指定项目的结构和元数据信息
创建快代理文件scrapySpider>kuaidaili.py:
import requestsclass Kuaidaili():    request_url = {        # 获取代理ip前面        'getIpSignature': ';,        # 获取代理ip        'getIp': '{}'    }    headers = {        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'    }    ip_use = '购买代理用户名'    ip_password = '购买代理密码'    def __init__(self):        '''创建request会话对象'''        self.request_session = requests.Session()        self.request_session.headers.update(self.headers)    # 获取代理ip签名    @classmethod    def get_ip_url(cls):        par = {            'secret_id': 'oy2q5xu76k4s8olx59et',            'secret_key': '5xg6gvouc0vszfw0kxs1a8vrw1r6ity7'        }        response = requests.post(cls.request_url['getIpSignature'],data=par)        response_data = response.json()        return cls.request_url['getIp'].format(response_data['data']['secret_token'])    @classmethod    def get_ip(cls):        url = cls.get_ip_url()        response = requests.get(url)        return f'http://{cls.ip_use}:{cls.ip_password}@{response.text}/'if __name__ == '__main__':    kuaidaili = Kuaidaili()    print(kuaidaili.get_ip())
爬取豆瓣案例douban.py
import scrapyfrom scrapy import cmdlinefrom scrapy.http import HtmlResponse,Requestfrom scrapySpider.items import DoubanItemclass DoubanSpider(scrapy.Spider):    name = 'douban'    allowed_domains = ['movie.douban.com']    start_urls = [';]    def parse(self, response: HtmlResponse,**kwargs):        video_list = response.xpath('//ol[@class="grid_view"]/li')        for li in video_list:            item = DoubanItem()            item['title'] = li.xpath('.//div[@class="hd"]/a/span[1]/text()').extract_first()            item['rating'] = li.xpath('.//div[@class="bd"]//span[@class="rating_num"]/text()').extract_first()            item['quote'] = li.xpath('.//div[@class="bd"]//p[@class="quote"]/span/text()').extract_first()            detail_url = li.xpath('.//div[@class="hd"]/a/@href').extract_first()            yield Request(url=detail_url,callback=self.get_detail_info,meta={'item':item})        #获取下一页数据        next_page_url = response.xpath('//div[@class="paginator"]//link[@rel="next"]/@href').extract_first()        if next_page_url:            yield Request(url=response.urljoin(next_page_url),callback=self.parse)    #重写start_requests获取多页数据    # def start_requests(self):    #     for i in range(0,2):    #         yield Request(url=f'{self.start_urls[0]}?start={i*25}&filter=',dont_filter=True,callback=self.parse)    def get_detail_info(self,response:HtmlResponse):        item = response.meta['item']        detail = response.xpath('//span[@class="all hidden"]/text()').extract_first()        if not detail:            detail = response.xpath('//div[@id="link-report-intra"]/span[1]/text()').extract_first()        item['intro'] = detail.strip()        request itemif __name__ == '__main__':    cmdline.execute('scrapy crawl douban'.split())
settings.py
# Scrapy settings for scrapySpider project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##                = "scrapySpider"SPIDER_MODULES = ["scrapySpider.spiders"]NEWSPIDER_MODULE = "scrapySpider.spiders"# Crawl responsibly by identifying yourself (and your website) on the user-agent# USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"# Obey robots.txt rulesROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See  See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:DEFAULT_REQUEST_HEADERS = {   "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",   "Accept-Language": "en",}# Enable or disable spider middlewares# See  = {#    "scrapySpider.middlewares.ScrapyspiderSpiderMiddleware": 543,#}# Enable or disable downloader middlewares# See  = {   "scrapySpider.middlewares.DoubanDownloaderMiddleware": 543,}# Enable or disable extensions# See  EXTENSIONS = {#    'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,# }# Configure item pipelines# See  = {   "scrapySpider.pipelines.MysqlPipeLine": 300,   "scrapySpider.pipelines.MongoPipeLine": 301,}# Enable and configure the AutoThrottle extension (disabled by default)# See  = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See  = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = "httpcache"#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"# Set settings whose default value is deprecated to a future-proof valueREQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"FEED_EXPORT_ENCODING = "utf-8"#日志配置# LOG_FILE = 'log.log'# LOG_FILE_APPEND = False# LOG_LEVEL = 'INFO'
items.py
# Define here the models for your scraped items## See documentation in:#  scrapyclass DoubanItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    title = scrapy.Field()    rating = scrapy.Field()    quote = scrapy.Field()    intro = scrapy.Field()
pipelines.py
# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See:  useful for handling different item types with a single interfacefrom itemadapter import ItemAdapterimport pymysqlimport pymongovideo_spider = ['douban']class DoubanPipeline:    def process_item(self, item, spider):        print(item)        return itemclass MysqlPipeLine:    def open_spider(self, spider):        self.spider = spider        self.mysql = pymysql.connect(host='localhost',port=3306,user='root',password='root')        self.cursor = self.mysql.cursor()        # 创建video数据库和相关爬虫表        if self.spider.name in video_spider:            self.create_db('video')    '''创建数据库'''    def create_db(self,db_name):         sql = f'''CREATE DATABASE IF NOT EXISTS {db_name}'''         try:             self.cursor.execute(sql)             self.mysql.select_db(db_name)             if self.spider.name == 'douban':                 self.create_douban_table()         except Exception as e:            print(f'创建{db_name}数据库失败:{e}')    '''创建表douban'''    def create_douban_table(self):        sql = f'''        CREATE TABLE IF NOT EXISTS {self.spider.name}(        id INT AUTO_INCREMENT,        title VARCHAR(255),        rating FLOAT,        quote VARCHAR(255),        intro TEXT,        PRIMARY KEY(id)        )        '''        try:            self.cursor.execute(sql)        except Exception as e:            print(f'创建douban表失败:{e}')    def process_item(self, item, spider):        if spider.name == 'douban':            sql = f'''INSERT INTO {spider.name}(title,rating,quote,intro) VALUES(%(title)s,%(rating)s,%(quote)s,%(intro)s)'''            try:                item['rating'] = float(item['rating'])                self.cursor.execute(sql,dict(item))                self.mysql.commit()            except Exception as e:                print(f'”{item["title"]}”插入失败:{e}')                self.mysql.rollback()        return item    def close_spider(self,spider):        self.mysql.close()class MongoPipeLine:    def open_spider(self, spider):        self.spider = spider        self.mongo = pymongo.MongoClient(host='localhost',port=27017)        # 创建video数据库和相关爬虫表        if self.spider.name in video_spider:            self.cursor = self.mongo['video'][self.spider.name]    def process_item(self, item, spider):        try:            self.cursor.insert_one(dict(item))        except Exception as e:            print(f'”{item["title"]}”插入失败:{e}')        return item    def close_spider(self, spider):        self.mongo.close()
middlewares.py
# Define here the models for your spider middleware## See documentation in:#  scrapy import signalsfrom fake_useragent import UserAgentfrom scrapy.http import Request,HtmlResponsefrom scrapySpider.kuaidaili import Kuaidaili# useful for handling different item types with a single interfacefrom itemadapter import is_item, ItemAdapterclass DoubanDownloaderMiddleware:    def __init__(self):        self.ua = UserAgent()        self.kuaidaili = Kuaidaili()        #初始化一个代理ip        self.first_ip = self.kuaidaili.get_ip()    @classmethod    def from_crawler(cls, crawler):        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_request(self, request:Request, spider):        #设置UA        request.headers['User-Agent'] = self.ua.random        #设置代理        request.meta['proxy'] = self.first_ip        request.meta['download_timeout'] = 5        spider.logger.info(f'ip:{request.meta["proxy"]}')        return None    def process_response(self, request, response:HtmlResponse, spider):        spider.logger.info(f'ip:{request.meta["proxy"]}')        if response.status == 200:            return response        #代理失效小重新设置代理,并返回request重新请求        request.meta['proxy'] = self.kuaidaili.get_ip()        request.meta['download_timeout'] = 2        return request    def spider_opened(self, spider):        spider.logger.info(f'"{spider.name}"Spide')

标签: #豆瓣app爬虫