forked from MadeOfMagicAndWires/qBit-plugins
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnyaasi.py
154 lines (134 loc) · 5.13 KB
/
nyaasi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# -*- coding: utf-8 -*-
#VERSION: 1.1
#AUTHORS: Joost Bremmer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
# import qBT modules
try:
from novaprinter import prettyPrinter
from helpers import retrieve_url
except:
pass
class nyaasi(object):
"""Class used by qBittorrent to search for torrents"""
url = 'https://nyaa.si'
name = 'Nyaa.si'
# defines which search categories are supported by this search engine
# and their corresponding id. Possible categories are:
# 'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pictures',
# 'books'
supported_categories = {
'all': '0_0',
'anime': '1_0',
'books': '3_0',
'music': '2_0',
'pictures': '5_0',
'software': '6_0',
'tv': '4_0',
'movies': '4_0'}
class NyaasiParser(HTMLParser):
""" Parses Nyaa.si browse page for search resand prints them"""
def __init__(self, res, url):
try:
super().__init__()
except:
# See: http://stackoverflow.com/questions/9698614/
HTMLParser.__init__(self)
self.engine_url = url
self.results = res
self.curr = None
self.td_counter = -1
def handle_starttag(self, tag, attr):
"""Tell the parser what to do with which tags"""
if tag == 'a':
self.start_a(attr)
def handle_endtag(self, tag):
if tag == 'td':
self.start_td()
def start_a(self, attr):
params = dict(attr)
# get torrent name
if 'title' in params and 'class' not in params and params['href'].startswith('/view/'):
hit = {
'name': params['title'],
'desc_link': self.engine_url + params['href']}
if not self.curr:
hit['engine_url'] = self.engine_url
self.curr = hit
elif 'href' in params and params['href'].startswith("magnet:?"):
if self.curr:
self.curr['link'] = params['href']
self.td_counter += 1
def start_td(self):
# Keep track of timers
if self.td_counter >= 0:
self.td_counter += 1
# Add the hit to the results,
# then reset the counters for the next result
if self.td_counter >= 5:
self.results.append(self.curr)
self.curr = None
self.td_counter = -1
def handle_data(self, data):
# These fields matter
if self.td_counter > 0 and self.td_counter <= 5:
# Catch the size
if self.td_counter == 1:
self.curr['size'] = data.strip()
# Catch the seeds
elif self.td_counter == 3:
try:
self.curr['seeds'] = int(data.strip())
except:
self.curr['seeds'] = -1
# Catch the leechers
elif self.td_counter == 4:
try:
self.curr['leech'] = int(data.strip())
except:
self.curr['leech'] = -1
# The rest is not supported by prettyPrinter
else:
pass
# DO NOT CHANGE the name and parameters of this function
# This function will be the one called by nova2.py
def search(self, what, cat='all'):
"""
Retreive and parse engine search results by category and query.
Parameters:
:param what: a string with the search tokens, already escaped
(e.g. "Ubuntu+Linux")
:param cat: the name of a search category, see supported_categories.
"""
url = str("{0}/?f=0&s=seeders&o=desc&c={1}&q={2}"
.format(self.url,
self.supported_categories.get(cat),
what))
hits = []
page = 1
parser = self.NyaasiParser(hits, self.url)
while True:
res = retrieve_url(url + "&p={}".format(page))
parser.feed(res)
for each in hits:
prettyPrinter(each)
if len(hits) < 75:
break
del hits[:]
page += 1
parser.close()