diff --git "a/\346\241\210\344\276\21331/CSDN/__init__.py" "b/\346\241\210\344\276\21331/CSDN/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\346\241\210\344\276\21331/CSDN/__pycache__/__init__.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/__pycache__/__init__.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..eb13ba7fdf7382c88f6e4626beff659b8ed15e25 Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/__pycache__/__init__.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/CSDN/__pycache__/items.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/__pycache__/items.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..b7af86cd888de856c4e0347f7b489b07adbca130 Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/__pycache__/items.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/CSDN/__pycache__/pipelines.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/__pycache__/pipelines.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..2ffbe191773d74c623a3fef9a259f74747c9e6ef Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/__pycache__/pipelines.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/CSDN/__pycache__/settings.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/__pycache__/settings.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..dafeee83060f10e76e19f68596fed08b4505028d Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/__pycache__/settings.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/CSDN/begin.py" "b/\346\241\210\344\276\21331/CSDN/begin.py" new file mode 100644 index 0000000000000000000000000000000000000000..d842d0e51340d8b30755d9c5c76e657fdb12e372 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/begin.py" @@ -0,0 +1,2 @@ +from scrapy import cmdline +cmdline.execute(("scrapy crawl C").split()) \ No newline at end of file diff --git "a/\346\241\210\344\276\21331/CSDN/items.py" "b/\346\241\210\344\276\21331/CSDN/items.py" new file mode 100644 index 0000000000000000000000000000000000000000..df3fbc2c3dc93a767a7e55c114e0c80813a29ffe --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/items.py" @@ -0,0 +1,12 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class CsdnItem(scrapy.Item): + # define the fields for your item here like: + username = scrapy.Field() + blogUrl = scrapy.Field() diff --git "a/\346\241\210\344\276\21331/CSDN/middlewares.py" "b/\346\241\210\344\276\21331/CSDN/middlewares.py" new file mode 100644 index 0000000000000000000000000000000000000000..f701a62687f9218d0ef6dd674d45aa8245cbfd49 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/middlewares.py" @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class CsdnSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class CsdnDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git "a/\346\241\210\344\276\21331/CSDN/pipelines.py" "b/\346\241\210\344\276\21331/CSDN/pipelines.py" new file mode 100644 index 0000000000000000000000000000000000000000..797e40e1a425e318ed0cf34e7289b10635a17279 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/pipelines.py" @@ -0,0 +1,13 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter + + +class CsdnPipeline: + def process_item(self, item, spider): + return item diff --git "a/\346\241\210\344\276\21331/CSDN/settings.py" "b/\346\241\210\344\276\21331/CSDN/settings.py" new file mode 100644 index 0000000000000000000000000000000000000000..40d0aebab262fbc7689e3e84112d161c110e6c85 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/settings.py" @@ -0,0 +1,89 @@ +# Scrapy settings for CSDN project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'CSDN' + +SPIDER_MODULES = ['CSDN.spiders'] +NEWSPIDER_MODULE = 'CSDN.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +DEFAULT_REQUEST_HEADERS = { + 'Accept': 'application/json, text/plain, */*', + 'Accept-Language': 'en', + 'referer': 'https://dream.blog.csdn.net/?type=sub&subType=fans' +} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'CSDN.middlewares.CsdnSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'CSDN.middlewares.CsdnDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'CSDN.pipelines.CsdnPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git "a/\346\241\210\344\276\21331/CSDN/spiders/C.py" "b/\346\241\210\344\276\21331/CSDN/spiders/C.py" new file mode 100644 index 0000000000000000000000000000000000000000..615a68de2a71f4aec741717ad26d09bca5b642e9 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/spiders/C.py" @@ -0,0 +1,24 @@ +import scrapy +from scrapy import Request +import json +from items import CsdnItem +class CSpider(scrapy.Spider): + name = 'C' + allowed_domains = ['blog.csdn.net'] + start_urls = ['https://blog.csdn.net/community/home-api/v1/get-fans-list?page=1&size=20&noMore=false&blogUsername=hihell'] + + def parse(self, response): + # 总页码临时设置为 10 + for page in range(2,11): + print("正在爬取{}页".format(page),end="") + yield Request("https://blog.csdn.net/community/home-api/v1/get-fans-list?page={}&size=20&noMore=false&blogUsername=hihell".format(str(page)), callback=self.parse_item) + + def parse_item(self,response): + + data = json.loads(response.body_as_unicode()) + print("*"*100) + item = CsdnItem() + for one_item in data["data"]["list"]: + item["username"] = one_item["username"] + item["blogUrl"] = one_item["blogUrl"] + yield item \ No newline at end of file diff --git "a/\346\241\210\344\276\21331/CSDN/spiders/__init__.py" "b/\346\241\210\344\276\21331/CSDN/spiders/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..ebd689ac51d69c5e1dbbe80083c2b20a39f8bb79 --- /dev/null +++ "b/\346\241\210\344\276\21331/CSDN/spiders/__init__.py" @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git "a/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/C.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/C.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..4ba2a0eca2eb750d46d4078e466c0d40dfcb9c92 Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/C.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/__init__.cpython-38.pyc" "b/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/__init__.cpython-38.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..f05595e0f36c9600c1fb2ecaed8319b26ebf7160 Binary files /dev/null and "b/\346\241\210\344\276\21331/CSDN/spiders/__pycache__/__init__.cpython-38.pyc" differ diff --git "a/\346\241\210\344\276\21331/scrapy.cfg" "b/\346\241\210\344\276\21331/scrapy.cfg" new file mode 100644 index 0000000000000000000000000000000000000000..06892ea5a3183d583572e90de08aef2b740ab45f --- /dev/null +++ "b/\346\241\210\344\276\21331/scrapy.cfg" @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = CSDN.settings + +[deploy] +#url = http://localhost:6800/ +project = CSDN