Skip to content

Commit

Permalink
Merge pull request #79 from tikazyq/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
Marvin Zhang authored Jul 7, 2019
2 parents 839dfca + 211fc13 commit 6ca31b7
Show file tree
Hide file tree
Showing 16 changed files with 358 additions and 9 deletions.
2 changes: 1 addition & 1 deletion README-zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ Crawlab的架构跟Celery非常相似,但是加入了包括前端、爬虫、F

任务是利用python的`subprocess`模块中的`Popen`来实现的。任务ID将以环境变量`CRAWLAB_TASK_ID`的形式存在于爬虫任务运行的进程中,并以此来关联抓取数据。

在你的爬虫程序中,你需要将`CRAWLAB_TASK_ID`的值以`task_id`作为可以存入数据库中。这样Crawlab就直到如何将爬虫任务与抓取数据关联起来了。当前,Crawlab只支持MongoDB。
在你的爬虫程序中,你需要将`CRAWLAB_TASK_ID`的值以`task_id`作为可以存入数据库中。这样Crawlab就知道如何将爬虫任务与抓取数据关联起来了。当前,Crawlab只支持MongoDB。

### Scrapy

Expand Down
2 changes: 1 addition & 1 deletion crawlab/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,4 +103,4 @@ def update_nodes_status_online(event):

if __name__ == '__main__':
# run app instance
app.run(host=FLASK_HOST, port=FLASK_PORT, threaded=True)
app.run(host=FLASK_HOST, port=FLASK_PORT)
1 change: 1 addition & 0 deletions crawlab/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ eventlet
Celery
Flower
redis
gunicorn
18 changes: 16 additions & 2 deletions crawlab/tasks/scheduler.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import atexit
import fcntl

import requests
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
Expand Down Expand Up @@ -65,8 +68,19 @@ def update(self):
print(f'running: {self.scheduler.running}')

def run(self):
self.update()
self.scheduler.start()
f = open("scheduler.lock", "wb")
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.update()
self.scheduler.start()
except:
pass

def unlock():
fcntl.flock(f, fcntl.LOCK_UN)
f.close()

atexit.register(unlock)


scheduler = Scheduler()
Expand Down
5 changes: 3 additions & 2 deletions docker_init.sh
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
#!/bin/sh
case $1 in
master)
cd /opt/crawlab/frontend \
cd $WORK_DIR/frontend \
&& npm run build:prod \
&& service nginx start
python $WORK_DIR/crawlab/flower.py >> /opt/crawlab/flower.log 2>&1 &
python $WORK_DIR/crawlab/worker.py >> /opt/crawlab/worker.log 2>&1 &
python $WORK_DIR/crawlab/app.py
cd $WORK_DIR/crawlab \
&& gunicorn --log-level=DEBUG -b 0.0.0.0 -w 8 app:app
;;
worker)
python $WORK_DIR/crawlab/app.py >> /opt/crawlab/app.log 2>&1 &
Expand Down
11 changes: 11 additions & 0 deletions spiders/sinastock/scrapy.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html

[settings]
default = sinastock.settings

[deploy]
#url = http://localhost:6800/
project = sinastock
Empty file.
21 changes: 21 additions & 0 deletions spiders/sinastock/sinastock/items.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class NewsItem(scrapy.Item):
# define the fields for your item here like:
_id = scrapy.Field()
title = scrapy.Field()
ts_str = scrapy.Field()
ts = scrapy.Field()
url = scrapy.Field()
text = scrapy.Field()
task_id = scrapy.Field()
source = scrapy.Field()
stocks = scrapy.Field()
103 changes: 103 additions & 0 deletions spiders/sinastock/sinastock/middlewares.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class SinastockSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.

# Should return None or raise an exception.
return None

def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.

# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i

def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.

# Should return either None or an iterable of Response, dict
# or Item objects.
pass

def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.

# Must return only requests (not items).
for r in start_requests:
yield r

def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)


class SinastockDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.

# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None

def process_response(self, request, response, spider):
# Called with the response returned from the downloader.

# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response

def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.

# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass

def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
28 changes: 28 additions & 0 deletions spiders/sinastock/sinastock/pipelines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os

from pymongo import MongoClient


class SinastockPipeline(object):
mongo = MongoClient(
host=os.environ.get('MONGO_HOST') or 'localhost',
port=int(os.environ.get('MONGO_PORT') or 27017)
)
db = mongo[os.environ.get('MONGO_DB') or 'crawlab_test']
col = db.get_collection(os.environ.get('CRAWLAB_COLLECTION') or 'stock_news')

# create indexes
col.create_index('stocks')
col.create_index('url')

def process_item(self, item, spider):
item['task_id'] = os.environ.get('CRAWLAB_TASK_ID')
if self.col.find_one({'url': item['url']}) is None:
self.col.save(item)
return item
89 changes: 89 additions & 0 deletions spiders/sinastock/sinastock/settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-

# Scrapy settings for sinastock project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'sinastock'

SPIDER_MODULES = ['sinastock.spiders']
NEWSPIDER_MODULE = 'sinastock.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'sinastock (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'sinastock.middlewares.SinastockSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'sinastock.middlewares.SinastockDownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'sinastock.pipelines.SinastockPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
4 changes: 4 additions & 0 deletions spiders/sinastock/sinastock/spiders/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
67 changes: 67 additions & 0 deletions spiders/sinastock/sinastock/spiders/sinastock_spider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime

import scrapy
from pymongo import MongoClient
import pytz

from sinastock.items import NewsItem

# 时区
tz = pytz.timezone('Asia/Shanghai')


class SinastockSpiderSpider(scrapy.Spider):
name = 'sinastock_spider'
allowed_domains = ['finance.sina.com.cn']
mongo = MongoClient(
host=os.environ.get('MONGO_HOST') or 'localhost',
port=int(os.environ.get('MONGO_PORT') or 27017)
)
db = mongo[os.environ.get('MONGO_DB') or 'crawlab_test']
col = db.get_collection(os.environ.get('CRAWLAB_COLLECTION') or 'stock_news')
page_num = int(os.environ.get('PAGE_NUM')) or 3

def start_requests(self):
col = self.db['stocks']
for s in col.find({}):
code, ex = s['ts_code'].split('.')
for i in range(self.page_num):
url = f'http://vip.stock.finance.sina.com.cn/corp/view/vCB_AllNewsStock.php?symbol={ex.lower()}{code}&Page={i + 1}'
yield scrapy.Request(
url=url,
callback=self.parse,
meta={'ts_code': s['ts_code']}
)

def parse(self, response):
for a in response.css('.datelist > ul > a'):
url = a.css('a::attr("href")').extract_first()
item = NewsItem(
title=a.css('a::text').extract_first(),
url=url,
source='sina',
stocks=[response.meta['ts_code']]
)
yield scrapy.Request(
url=url,
callback=self.parse_detail,
meta={'item': item}
)

def parse_detail(self, response):
item = response.meta['item']
text = response.css('#artibody').extract_first()
pre = re.compile('>(.*?)<')
text = ''.join(pre.findall(text))
item['text'] = text.replace('\u3000', '')
item['ts_str'] = response.css('.date::text').extract_first()
if item['text'] is None or item['ts_str'] is None:
pass
else:
ts = datetime.strptime(item['ts_str'], '%Y年%m月%d日 %H:%M')
ts = tz.localize(ts)
item['ts'] = ts
yield item
Loading

0 comments on commit 6ca31b7

Please sign in to comment.