mylar/mylar/opds.py

871 lines
41 KiB
Python
Raw Normal View History

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import mylar
2018-08-13 16:09:48 +00:00
from mylar import db, mb, importer, search, PostProcessor, versioncheck, logger, readinglist, helpers
import simplejson as simplejson
import cherrypy
2017-11-01 15:55:32 +00:00
from xml.sax.saxutils import escape
import os
2018-08-13 16:09:48 +00:00
import glob
import urllib2
2017-11-01 02:47:34 +00:00
from urllib import urlencode, quote_plus
import cache
import imghdr
from operator import itemgetter
from cherrypy.lib.static import serve_file, serve_download
import datetime
from mylar.webserve import serve_template
import re
2018-08-13 16:09:48 +00:00
cmd_list = ['root', 'Publishers', 'AllTitles', 'StoryArcs', 'ReadList', 'OneOffs', 'Comic', 'Publisher', 'Issue', 'StoryArc', 'Recent', 'deliverFile']
class OPDS(object):
def __init__(self):
self.cmd = None
self.PAGE_SIZE=mylar.CONFIG.OPDS_PAGESIZE
self.img = None
self.issue_id = None
self.file = None
self.filename = None
self.kwargs = None
self.data = None
2017-11-03 14:59:05 +00:00
if mylar.CONFIG.HTTP_ROOT is None:
self.opdsroot = '/opds'
elif mylar.CONFIG.HTTP_ROOT.endswith('/'):
self.opdsroot = mylar.CONFIG.HTTP_ROOT + 'opds'
else:
if mylar.CONFIG.HTTP_ROOT != '/':
self.opdsroot = mylar.CONFIG.HTTP_ROOT + '/opds'
else:
self.opdsroot = mylar.CONFIG.HTTP_ROOT + 'opds'
def checkParams(self, *args, **kwargs):
if 'cmd' not in kwargs:
self.cmd = 'root'
if not mylar.CONFIG.OPDS_ENABLE:
self.data = self._error_with_message('OPDS not enabled')
return
if not self.cmd:
if kwargs['cmd'] not in cmd_list:
self.data = self._error_with_message('Unknown command: %s' % kwargs['cmd'])
return
else:
self.cmd = kwargs.pop('cmd')
self.kwargs = kwargs
self.data = 'OK'
def fetchData(self):
if self.data == 'OK':
logger.fdebug('Recieved OPDS command: ' + self.cmd)
methodToCall = getattr(self, "_" + self.cmd)
result = methodToCall(**self.kwargs)
if self.img:
return serve_file(path=self.img, content_type='image/jpeg')
if self.file and self.filename:
if self.issue_id:
try:
readinglist.Readinglist(IssueID=self.issue_id).markasRead()
except:
2018-07-09 21:04:13 +00:00
logger.fdebug('No reading list found to update.')
return serve_download(path=self.file, name=self.filename)
if isinstance(self.data, basestring):
return self.data
else:
2017-11-01 14:24:30 +00:00
cherrypy.response.headers['Content-Type'] = "text/xml"
return serve_template(templatename="opds.html", title=self.data['title'], opds=self.data)
else:
return self.data
def _error_with_message(self, message):
error = '<feed><error>%s</error></feed>' % message
cherrypy.response.headers['Content-Type'] = "text/xml"
return error
2017-11-01 17:20:15 +00:00
def _dic_from_query(self, query):
myDB = db.DBConnection()
rows = myDB.select(query)
rows_as_dic = []
for row in rows:
row_as_dic = dict(zip(row.keys(), row))
rows_as_dic.append(row_as_dic)
return rows_as_dic
def _root(self, **kwargs):
myDB = db.DBConnection()
feed = {}
feed['title'] = 'Mylar OPDS'
currenturi = cherrypy.url()
feed['id'] = re.sub('/', ':', currenturi)
feed['updated'] = mylar.helpers.now()
links = []
entries=[]
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
links.append(getLink(href='%s?cmd=search' % self.opdsroot, type='application/opensearchdescription+xml',rel='search',title='Search'))
publishers = myDB.select("SELECT ComicPublisher from comics GROUP BY ComicPublisher")
2017-11-23 04:44:29 +00:00
entries.append(
{
'title': 'Recent Additions',
'id': 'Recent',
'updated': mylar.helpers.now(),
'content': 'Recently Added Issues',
'href': '%s?cmd=Recent' % self.opdsroot,
'kind': 'acquisition',
'rel': 'subsection',
}
)
if len(publishers) > 0:
count = len(publishers)
entries.append(
{
'title': 'Publishers (%s)' % count,
'id': 'Publishers',
'updated': mylar.helpers.now(),
'content': 'List of Comic Publishers',
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Publishers' %self.opdsroot,
2017-11-01 14:37:49 +00:00
'kind': 'navigation',
'rel': 'subsection',
}
)
comics = mylar.helpers.havetotals()
count = 0
for comic in comics:
if comic['haveissues'] > 0:
count += 1
if count > -1:
entries.append(
{
'title': 'All Titles (%s)' % count,
'id': 'AllTitles',
'updated': mylar.helpers.now(),
'content': 'List of All Comics',
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=AllTitles' % self.opdsroot,
2017-11-01 14:37:49 +00:00
'kind': 'navigation',
'rel': 'subsection',
}
)
storyArcs = mylar.helpers.listStoryArcs()
logger.debug(storyArcs)
if len(storyArcs) > 0:
entries.append(
{
'title': 'Story Arcs (%s)' % len(storyArcs),
'id': 'StoryArcs',
'updated': mylar.helpers.now(),
'content': 'List of Story Arcs',
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=StoryArcs' % self.opdsroot,
2017-11-01 14:37:49 +00:00
'kind': 'navigation',
'rel': 'subsection',
}
)
2017-11-01 02:47:34 +00:00
readList = myDB.select("SELECT * from readlist")
if len(readList) > 0:
entries.append(
{
'title': 'Read List (%s)' % len(readList),
'id': 'ReadList',
'updated': mylar.helpers.now(),
'content': 'Current Read List',
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=ReadList' % self.opdsroot,
2017-11-01 14:37:49 +00:00
'kind': 'navigation',
'rel': 'subsection',
2017-11-01 02:47:34 +00:00
}
)
2018-08-13 16:09:48 +00:00
gbd = mylar.CONFIG.GRABBAG_DIR + '/*'
oneofflist = glob.glob(gbd)
if len(oneofflist) > 0:
entries.append(
{
'title': 'One-Offs (%s)' % len(oneofflist),
'id': 'OneOffs',
'updated': mylar.helpers.now(),
'content': 'OneOffs',
'href': '%s?cmd=OneOffs' % self.opdsroot,
'kind': 'navigation',
'rel': 'subsection',
}
)
2017-11-01 02:47:34 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2017-11-01 02:47:34 +00:00
def _Publishers(self, **kwargs):
2017-11-01 13:47:29 +00:00
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
2017-11-01 02:47:34 +00:00
myDB = db.DBConnection()
feed = {}
feed['title'] = 'Mylar OPDS - Publishers'
feed['id'] = 'Publishers'
feed['updated'] = mylar.helpers.now()
links = []
entries=[]
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=Publishers' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
2017-11-01 02:47:34 +00:00
publishers = myDB.select("SELECT ComicPublisher from comics GROUP BY ComicPublisher")
comics = mylar.helpers.havetotals()
for publisher in publishers:
2017-11-01 17:20:15 +00:00
lastupdated = '0000-00-00'
2017-11-01 02:47:34 +00:00
totaltitles = 0
for comic in comics:
2017-11-01 03:22:50 +00:00
if comic['ComicPublisher'] == publisher['ComicPublisher'] and comic['haveissues'] > 0:
2017-11-01 02:47:34 +00:00
totaltitles += 1
2017-11-01 17:20:15 +00:00
if comic['DateAdded'] > lastupdated:
lastupdated = comic['DateAdded']
2017-11-01 02:47:34 +00:00
if totaltitles > 0:
entries.append(
{
2017-11-01 15:55:32 +00:00
'title': escape('%s (%s)' % (publisher['ComicPublisher'], totaltitles)),
'id': escape('publisher:%s' % publisher['ComicPublisher']),
2017-11-01 17:20:15 +00:00
'updated': lastupdated,
2017-11-01 15:55:32 +00:00
'content': escape('%s (%s)' % (publisher['ComicPublisher'], totaltitles)),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Publisher&amp;pubid=%s' % (self.opdsroot, quote_plus(publisher['ComicPublisher'])),
2017-11-01 02:47:34 +00:00
'kind': 'navigation',
2017-11-01 14:37:49 +00:00
'rel': 'subsection',
2017-11-01 02:47:34 +00:00
}
)
if len(entries) > (index + self.PAGE_SIZE):
2017-11-01 13:47:29 +00:00
links.append(
getLink(href='%s?cmd=AllTitles&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-01 13:47:29 +00:00
links.append(
getLink(href='%s?cmd=AllTitles&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-01 13:47:29 +00:00
feed['links'] = links
feed['entries'] = entries[index:(index+self.PAGE_SIZE)]
self.data = feed
return
2017-11-02 18:11:59 +00:00
def _AllTitles(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
feed = {}
feed['title'] = 'Mylar OPDS - All Titles'
feed['id'] = 'AllTitles'
feed['updated'] = mylar.helpers.now()
links = []
entries=[]
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=AllTitles' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
2017-11-02 18:11:59 +00:00
comics = mylar.helpers.havetotals()
for comic in comics:
if comic['haveissues'] > 0:
entries.append(
{
'title': escape('%s (%s) (comicID: %s)' % (comic['ComicName'], comic['ComicYear'], comic['ComicID'])),
'id': escape('comic:%s (%s) [%s]' % (comic['ComicName'], comic['ComicYear'], comic['ComicID'])),
2017-11-02 18:11:59 +00:00
'updated': comic['DateAdded'],
'content': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Comic&amp;comicid=%s' % (self.opdsroot, quote_plus(comic['ComicID'])),
2017-11-02 18:11:59 +00:00
'kind': 'acquisition',
'rel': 'subsection',
}
)
if len(entries) > (index + self.PAGE_SIZE):
2017-11-02 18:11:59 +00:00
links.append(
getLink(href='%s?cmd=AllTitles&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-02 18:11:59 +00:00
links.append(
getLink(href='%s?cmd=AllTitles&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-02 18:11:59 +00:00
feed['links'] = links
feed['entries'] = entries[index:(index+self.PAGE_SIZE)]
2017-11-02 18:11:59 +00:00
self.data = feed
return
2017-11-01 15:19:33 +00:00
def _Publisher(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
if 'pubid' not in kwargs:
2017-11-02 18:11:59 +00:00
self.data =self._error_with_message('No Publisher Provided')
2017-11-01 17:20:15 +00:00
return
links = []
entries=[]
allcomics = mylar.helpers.havetotals()
for comic in allcomics:
if comic['ComicPublisher'] == kwargs['pubid'] and comic['haveissues'] > 0:
entries.append(
{
'title': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])),
2017-11-01 17:20:15 +00:00
'id': escape('comic:%s (%s)' % (comic['ComicName'], comic['ComicYear'])),
'updated': comic['DateAdded'],
'content': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Comic&amp;comicid=%s' % (self.opdsroot, quote_plus(comic['ComicID'])),
2017-11-01 21:16:59 +00:00
'kind': 'acquisition',
2017-11-01 17:20:15 +00:00
'rel': 'subsection',
}
)
2017-11-01 15:19:33 +00:00
feed = {}
2017-11-01 17:25:18 +00:00
pubname = '%s (%s)' % (escape(kwargs['pubid']),len(entries))
2017-11-01 17:20:15 +00:00
feed['title'] = 'Mylar OPDS - %s' % (pubname)
feed['id'] = 'publisher:%s' % escape(kwargs['pubid'])
2017-11-01 15:19:33 +00:00
feed['updated'] = mylar.helpers.now()
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=Publishers' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(entries) > (index + self.PAGE_SIZE):
2017-11-01 15:28:01 +00:00
links.append(
getLink(href='%s?cmd=Publisher&amp;pubid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['pubid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-01 15:28:01 +00:00
links.append(
getLink(href='%s?cmd=Publisher&amp;pubid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['pubid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-01 15:28:01 +00:00
feed['links'] = links
feed['entries'] = entries[index:(index+self.PAGE_SIZE)]
2017-11-01 15:28:01 +00:00
self.data = feed
return
2017-11-01 15:19:33 +00:00
2017-11-02 18:11:59 +00:00
2017-11-01 17:20:15 +00:00
def _Comic(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
if 'comicid' not in kwargs:
2017-11-02 18:11:59 +00:00
self.data =self._error_with_message('No ComicID Provided')
2017-11-01 17:20:15 +00:00
return
links = []
entries=[]
2017-11-01 17:52:44 +00:00
comic = myDB.selectone('SELECT * from comics where ComicID=?', (kwargs['comicid'],)).fetchone()
2017-12-07 19:02:57 +00:00
if not comic:
2017-11-02 18:11:59 +00:00
self.data = self._error_with_message('Comic Not Found')
2017-11-01 17:20:15 +00:00
return
issues = self._dic_from_query('SELECT * from issues WHERE ComicID="' + kwargs['comicid'] + '"order by Int_IssueNumber DESC')
if mylar.CONFIG.ANNUALS_ON:
annuals = self._dic_from_query('SELECT * FROM annuals WHERE ComicID="' + kwargs['comicid'] + '"')
else:
2017-11-02 18:11:59 +00:00
annuals = []
2017-11-01 17:20:15 +00:00
for annual in annuals:
issues.append(annual)
issues = [x for x in issues if x['Location']]
if index <= len(issues):
subset = issues[index:(index+self.PAGE_SIZE)]
2017-11-01 17:20:15 +00:00
for issue in subset:
2017-11-02 02:31:29 +00:00
if 'DateAdded' in issue and issue['DateAdded']:
2017-11-01 18:25:24 +00:00
updated = issue['DateAdded']
else:
updated = issue['ReleaseDate']
2017-11-02 18:11:59 +00:00
image = None
thumbnail = None
if not 'ReleaseComicID' in issue:
title = escape('%s (%s) #%s - %s' % (issue['ComicName'], comic['ComicYear'], issue['Issue_Number'], issue['IssueName']))
2017-11-02 18:11:59 +00:00
image = issue['ImageURL_ALT']
thumbnail = issue['ImageURL']
2017-11-02 02:31:29 +00:00
else:
2017-11-02 18:11:59 +00:00
title = escape('Annual %s - %s' % (issue['Issue_Number'], issue['IssueName']))
2017-11-01 18:49:20 +00:00
fileloc = os.path.join(comic['ComicLocation'],issue['Location'])
if not os.path.isfile(fileloc):
logger.debug("Missing File: %s" % (fileloc))
continue
2017-11-02 18:11:59 +00:00
metainfo = None
if mylar.CONFIG.OPDS_METAINFO:
metainfo = mylar.helpers.IssueDetails(fileloc)
2017-11-01 19:01:36 +00:00
if not metainfo:
2017-11-02 18:11:59 +00:00
metainfo = [{'writer': None,'summary': ''}]
2017-11-01 17:20:15 +00:00
entries.append(
{
'title': escape(title),
'id': escape('comic:%s (%s) [%s] - %s' % (issue['ComicName'], comic['ComicYear'], comic['ComicID'], issue['Issue_Number'])),
2017-11-01 18:25:24 +00:00
'updated': updated,
2017-11-01 18:52:36 +00:00
'content': escape('%s' % (metainfo[0]['summary'])),
'href': '%s?cmd=Issue&amp;issueid=%s&amp;file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['Location'].encode('utf-8'))),
2017-11-01 17:20:15 +00:00
'kind': 'acquisition',
2017-11-01 19:28:35 +00:00
'rel': 'file',
2017-11-01 18:52:36 +00:00
'author': metainfo[0]['writer'],
2017-11-02 18:11:59 +00:00
'image': image,
'thumbnail': thumbnail,
2017-11-01 17:20:15 +00:00
}
)
feed = {}
2017-11-01 17:38:18 +00:00
comicname = '%s' % (escape(comic['ComicName']))
feed['title'] = 'Mylar OPDS - %s' % (comicname)
feed['id'] = escape('comic:%s (%s)' % (comic['ComicName'], comic['ComicYear']))
feed['updated'] = comic['DateAdded']
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=Comic&amp;comicid=%s' % (self.opdsroot, quote_plus(kwargs['comicid'])),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(issues) > (index + self.PAGE_SIZE):
2017-11-01 17:20:15 +00:00
links.append(
getLink(href='%s?cmd=Comic&amp;comicid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['comicid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-01 17:20:15 +00:00
links.append(
getLink(href='%s?cmd=Comic&amp;comicid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['comicid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-01 17:20:15 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2017-11-23 04:44:29 +00:00
def _Recent(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
links = []
entries=[]
recents = self._dic_from_query('SELECT * from snatched WHERE Status = "Post-Processed" OR Status = "Downloaded" order by DateAdded DESC LIMIT 120')
2017-11-23 04:44:29 +00:00
if index <= len(recents):
number = 1
subset = recents[index:(index+self.PAGE_SIZE)]
2017-11-23 04:44:29 +00:00
for issue in subset:
issuebook = myDB.fetch('SELECT * from issues WHERE IssueID = ?', (issue['IssueID'],)).fetchone()
2017-12-07 19:02:57 +00:00
if not issuebook:
2017-12-13 02:31:21 +00:00
issuebook = myDB.fetch('SELECT * from annuals WHERE IssueID = ?', (issue['IssueID'],)).fetchone()
2017-11-23 04:44:29 +00:00
comic = myDB.fetch('SELECT * from comics WHERE ComicID = ?', (issue['ComicID'],)).fetchone()
updated = issue['DateAdded']
image = None
thumbnail = None
2017-12-29 18:11:12 +00:00
if issuebook:
if not 'ReleaseComicID' in issuebook.keys():
2018-03-09 17:27:12 +00:00
if issuebook['DateAdded'] is None:
title = escape('%03d: %s #%s - %s (In stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['ReleaseDate']))
image = issuebook['ImageURL_ALT']
thumbnail = issuebook['ImageURL']
else:
title = escape('%03d: %s #%s - %s (Added to Mylar %s, in stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['DateAdded'], issuebook['ReleaseDate']))
image = issuebook['ImageURL_ALT']
thumbnail = issuebook['ImageURL']
2017-12-29 18:11:12 +00:00
else:
title = escape('%03d: %s Annual %s - %s (In stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['ReleaseDate']))
2017-12-29 18:11:12 +00:00
# logger.info("%s - %s" % (comic['ComicLocation'], issuebook['Location']))
number +=1
if not issuebook['Location']:
continue
location = issuebook['Location'].encode('utf-8')
fileloc = os.path.join(comic['ComicLocation'],issuebook['Location'])
metainfo = None
if mylar.CONFIG.OPDS_METAINFO:
metainfo = mylar.helpers.IssueDetails(fileloc)
if not metainfo:
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': title,
'id': escape('comic:%s (%s) - %s' % (issuebook['ComicName'], comic['ComicYear'], issuebook['Issue_Number'])),
2017-12-29 18:11:12 +00:00
'updated': updated,
'content': escape('%s' % (metainfo[0]['summary'])),
'href': '%s?cmd=Issue&amp;issueid=%s&amp;file=%s' % (self.opdsroot, quote_plus(issuebook['IssueID']),quote_plus(location)),
'kind': 'acquisition',
'rel': 'file',
'author': metainfo[0]['writer'],
'image': image,
'thumbnail': thumbnail,
}
)
2017-11-23 04:44:29 +00:00
feed = {}
feed['title'] = 'Mylar OPDS - New Arrivals'
feed['id'] = escape('New Arrivals')
feed['updated'] = mylar.helpers.now()
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=Recent' % (self.opdsroot),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(recents) > (index + self.PAGE_SIZE):
2017-11-23 04:44:29 +00:00
links.append(
getLink(href='%s?cmd=Recent&amp;index=%s' % (self.opdsroot,index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-23 04:44:29 +00:00
links.append(
getLink(href='%s?cmd=Recent&amp;index=%s' % (self.opdsroot,index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-23 04:44:29 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2018-08-13 16:09:48 +00:00
def _deliverFile(self, **kwargs):
logger.fdebug("_deliverFile: kwargs: %s" % kwargs)
if 'file' not in kwargs:
self.data = self._error_with_message('No file provided')
elif 'filename' not in kwargs:
self.data = self._error_with_message('No filename provided')
else:
#logger.fdebug("file name: %s" % str(kwargs['file'])
2018-08-13 16:09:48 +00:00
self.filename = os.path.split(str(kwargs['file']))[1]
self.file = str(kwargs['file'])
return
2017-11-23 04:44:29 +00:00
2017-11-01 19:14:37 +00:00
def _Issue(self, **kwargs):
if 'issueid' not in kwargs:
2017-11-02 18:11:59 +00:00
self.data = self._error_with_message('No ComicID Provided')
2017-11-01 19:14:37 +00:00
return
myDB = db.DBConnection()
2017-12-07 17:57:26 +00:00
issuetype = 0
IMP: Added Choose specific Download option to manually select from result list on Upcoming/Details/Weekly tabs, IMP: Added Wanted storyarcs to the overall Wanted section so now will search as per global options (storyarc issues can be displayed optionally on Wanted tab), IMP: Added custom url option for image banners/posters for storyarcs, IMP: updated Cherrypy, FIX: Fixed ComicRN not working when forms authentication used - in conjunction updated autoProcessComics to 2.0 which now uses apikey instead of user/pass, IMP: Alternate Series Covers option for alternating existing series image to the most current issue image, IMP: Added overall series total to series page for reference, IMP: Search workflow completely changed to accomodate more than one indexer - now will simultaneously sumbit initial request to each provider, wait 30s submit additional as required at 30s intervals, FIX: Removed TPSE as an option and relabelled to just Public Torrents, IMP: Added direct backlog search to WWT option (pack support will follow), FIX: Removed line about configparser being required for ComicTagger usage, IMP: Test code in place for newzab testing, FIX: Fixed layout problem with torrents that are in auto-snatch status on weekly tab, IMP: backend code improvements to allow for better alias usage and annual linking directly from WS, IMP: Updated systemd init-scripts with read.me, IMP: When post-processing, will now check for available destination free space before actually moving files, IMP: Will copy during metatagging to cache folder instead of move being an option so cleanup is cleaner if something fails, FIX: Changed readinglist table to storyarcs for clarity, IMP: When post-processing issues, will now only update the one issue status and adjust totals accordingly (instead of doing a complete rescan of the series), FIX: Clear out empty ID's from the Failed DB on startup, IMP: Initial code-run at REST API interface, FIX: Fixed some matching problems with 32p due to case, IMP: removed apikeys from log entries that were accidentally logging, IMP: When searching 32p, if items get packed up - will now delete the cached reference so new items of the same can be located, IMP: ForceSearch option switched to scheduler so simultaneous runs should not occur, FIX: Fixed manual metatagging error that would occur if multiple destination directories existed
2018-02-16 19:57:01 +00:00
issue = myDB.selectone("SELECT * from storyarcs WHERE IssueID=? and Location IS NOT NULL",
2017-12-07 19:24:25 +00:00
(kwargs['issueid'],)).fetchone()
2017-12-07 19:02:57 +00:00
if not issue:
2017-12-07 19:24:25 +00:00
issue = myDB.selectone("SELECT * from issues WHERE IssueID=?", (kwargs['issueid'],)).fetchone()
2017-12-07 19:02:57 +00:00
if not issue:
2017-12-07 19:24:25 +00:00
issue = myDB.selectone("SELECT * from annuals WHERE IssueID=?", (kwargs['issueid'],)).fetchone()
2017-12-07 19:02:57 +00:00
if not issue:
2017-12-07 17:57:26 +00:00
self.data = self._error_with_message('Issue Not Found')
return
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", (issue['ComicID'],)).fetchone()
if not comic:
self.data = self._error_with_message('Comic Not Found in Watchlist')
return
self.issue_id = issue['IssueID']
self.file = os.path.join(comic['ComicLocation'],issue['Location'])
self.filename = issue['Location']
2017-12-07 19:24:25 +00:00
else:
self.issue_id = issue['IssueID']
2017-12-07 17:57:26 +00:00
self.file = issue['Location']
self.filename = os.path.split(issue['Location'])[1]
2017-11-01 19:14:37 +00:00
return
2017-11-01 17:20:15 +00:00
2017-11-02 02:31:29 +00:00
def _StoryArcs(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
links = []
entries=[]
arcs = []
storyArcs = mylar.helpers.listStoryArcs()
for arc in storyArcs:
issuecount = 0
arcname = ''
updated = '0000-00-00'
IMP: Added Choose specific Download option to manually select from result list on Upcoming/Details/Weekly tabs, IMP: Added Wanted storyarcs to the overall Wanted section so now will search as per global options (storyarc issues can be displayed optionally on Wanted tab), IMP: Added custom url option for image banners/posters for storyarcs, IMP: updated Cherrypy, FIX: Fixed ComicRN not working when forms authentication used - in conjunction updated autoProcessComics to 2.0 which now uses apikey instead of user/pass, IMP: Alternate Series Covers option for alternating existing series image to the most current issue image, IMP: Added overall series total to series page for reference, IMP: Search workflow completely changed to accomodate more than one indexer - now will simultaneously sumbit initial request to each provider, wait 30s submit additional as required at 30s intervals, FIX: Removed TPSE as an option and relabelled to just Public Torrents, IMP: Added direct backlog search to WWT option (pack support will follow), FIX: Removed line about configparser being required for ComicTagger usage, IMP: Test code in place for newzab testing, FIX: Fixed layout problem with torrents that are in auto-snatch status on weekly tab, IMP: backend code improvements to allow for better alias usage and annual linking directly from WS, IMP: Updated systemd init-scripts with read.me, IMP: When post-processing, will now check for available destination free space before actually moving files, IMP: Will copy during metatagging to cache folder instead of move being an option so cleanup is cleaner if something fails, FIX: Changed readinglist table to storyarcs for clarity, IMP: When post-processing issues, will now only update the one issue status and adjust totals accordingly (instead of doing a complete rescan of the series), FIX: Clear out empty ID's from the Failed DB on startup, IMP: Initial code-run at REST API interface, FIX: Fixed some matching problems with 32p due to case, IMP: removed apikeys from log entries that were accidentally logging, IMP: When searching 32p, if items get packed up - will now delete the cached reference so new items of the same can be located, IMP: ForceSearch option switched to scheduler so simultaneous runs should not occur, FIX: Fixed manual metatagging error that would occur if multiple destination directories existed
2018-02-16 19:57:01 +00:00
arclist = myDB.select("SELECT * from storyarcs WHERE StoryArcID=?", (arc,))
2017-11-02 02:31:29 +00:00
for issue in arclist:
if issue['Status'] == 'Downloaded':
issuecount += 1
arcname = issue['StoryArc']
if issue['IssueDate'] > updated:
updated = issue['IssueDate']
if issuecount > 0:
2017-11-02 18:11:59 +00:00
arcs.append({'StoryArcName': arcname, 'StoryArcID': arc, 'IssueCount': issuecount, 'updated': updated})
newlist = sorted(arcs, key=itemgetter('StoryArcName'))
subset = newlist[index:(index + self.PAGE_SIZE)]
2017-11-02 02:31:29 +00:00
for arc in subset:
entries.append(
{
'title': '%s (%s)' % (arc['StoryArcName'],arc['IssueCount']),
'id': escape('storyarc:%s' % (arc['StoryArcID'])),
2017-11-02 18:11:59 +00:00
'updated': arc['updated'],
2017-11-02 02:31:29 +00:00
'content': '%s (%s)' % (arc['StoryArcName'],arc['IssueCount']),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=StoryArc&amp;arcid=%s' % (self.opdsroot, quote_plus(arc['StoryArcID'])),
2017-11-02 02:31:29 +00:00
'kind': 'acquisition',
'rel': 'subsection',
}
)
feed = {}
feed['title'] = 'Mylar OPDS - Story Arcs'
feed['id'] = 'StoryArcs'
feed['updated'] = mylar.helpers.now()
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=StoryArcs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(arcs) > (index + self.PAGE_SIZE):
2017-11-02 02:31:29 +00:00
links.append(
getLink(href='%s?cmd=StoryArcs&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-02 02:31:29 +00:00
links.append(
getLink(href='%s?cmd=StoryArcs&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-02 02:31:29 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2018-08-13 16:09:48 +00:00
def _OneOffs(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
links = []
entries = []
flist = []
book = ''
gbd = str(mylar.CONFIG.GRABBAG_DIR + '/*').encode('utf-8')
flist = glob.glob(gbd)
readlist = []
for book in flist:
issue = {}
fileexists = True
book = book.encode('utf-8')
issue['Title'] = book
issue['IssueID'] = book
issue['fileloc'] = book
issue['filename'] = book
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = helpers.now()
if not os.path.isfile(issue['fileloc']):
fileexists = False
if fileexists:
readlist.append(issue)
if len(readlist) > 0:
if index <= len(readlist):
subset = readlist[index:(index + self.PAGE_SIZE)]
for issue in subset:
metainfo = None
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': escape(issue['Title']),
'id': escape('comic:%s' % issue['IssueID']),
'updated': issue['updated'],
'content': escape('%s' % (metainfo[0]['summary'])),
'href': '%s?cmd=deliverFile&amp;file=%s&amp;filename=%s' % (self.opdsroot, quote_plus(issue['fileloc']), quote_plus(issue['filename'])),
'kind': 'acquisition',
'rel': 'file',
'author': metainfo[0]['writer'],
'image': issue['image'],
'thumbnail': issue['thumbnail'],
}
)
feed = {}
feed['title'] = 'Mylar OPDS - One-Offs'
feed['id'] = escape('OneOffs')
feed['updated'] = mylar.helpers.now()
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=OneOffs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(readlist) > (index + self.PAGE_SIZE):
links.append(
getLink(href='%s?cmd=OneOffs&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
links.append(
getLink(href='%s?cmd=Read&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2017-11-02 18:11:59 +00:00
def _ReadList(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
links = []
entries = []
rlist = self._dic_from_query("SELECT * from readlist where status!='Read'")
2017-11-02 18:11:59 +00:00
readlist = []
for book in rlist:
fileexists = False
issue = {}
issue['Title'] = '%s #%s' % (book['ComicName'], book['Issue_Number'])
issue['IssueID'] = book['IssueID']
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", (book['ComicID'],)).fetchone()
bookentry = myDB.selectone("SELECT * from issues WHERE IssueID=?", (book['IssueID'],)).fetchone()
2017-12-07 19:02:57 +00:00
if bookentry:
2017-11-02 18:11:59 +00:00
if bookentry['Location']:
fileexists = True
issue['fileloc'] = os.path.join(comic['ComicLocation'], bookentry['Location'])
issue['filename'] = bookentry['Location'].encode('utf-8')
2017-11-02 18:11:59 +00:00
issue['image'] = bookentry['ImageURL_ALT']
issue['thumbnail'] = bookentry['ImageURL']
if bookentry['DateAdded']:
issue['updated'] = bookentry['DateAdded']
else:
issue['updated'] = bookentry['IssueDate']
else:
annualentry = myDB.selectone("SELECT * from annuals WHERE IssueID=?", (book['IssueID'],)).fetchone()
2017-12-07 19:02:57 +00:00
if annualentry:
2017-11-02 18:11:59 +00:00
if annualentry['Location']:
fileexists = True
issue['fileloc'] = os.path.join(comic['ComicLocation'], annualentry['Location'])
issue['filename'] = annualentry['Location'].encode('utf-8')
2017-11-02 18:11:59 +00:00
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = annualentry['IssueDate']
if not os.path.isfile(issue['fileloc']):
fileexists = False
2017-11-02 18:11:59 +00:00
if fileexists:
readlist.append(issue)
if len(readlist) > 0:
if index <= len(readlist):
subset = readlist[index:(index + self.PAGE_SIZE)]
2017-11-02 18:11:59 +00:00
for issue in subset:
metainfo = None
if mylar.CONFIG.OPDS_METAINFO:
metainfo = mylar.helpers.IssueDetails(issue['fileloc'])
if not metainfo:
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': escape(issue['Title']),
'id': escape('comic:%s' % issue['IssueID']),
'updated': issue['updated'],
'content': escape('%s' % (metainfo[0]['summary'])),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Issue&amp;issueid=%s&amp;file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['filename'])),
2017-11-02 18:11:59 +00:00
'kind': 'acquisition',
'rel': 'file',
'author': metainfo[0]['writer'],
'image': issue['image'],
'thumbnail': issue['thumbnail'],
}
)
feed = {}
feed['title'] = 'Mylar OPDS - ReadList'
feed['id'] = escape('ReadList')
feed['updated'] = mylar.helpers.now()
2017-11-03 14:59:05 +00:00
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=ReadList' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(readlist) > (index + self.PAGE_SIZE):
2017-11-02 18:11:59 +00:00
links.append(
getLink(href='%s?cmd=ReadList&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
2017-11-02 18:11:59 +00:00
links.append(
getLink(href='%s?cmd=Read&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-02 18:11:59 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
def _StoryArc(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
myDB = db.DBConnection()
if 'arcid' not in kwargs:
self.data =self._error_with_message('No ArcID Provided')
return
links = []
entries=[]
IMP: Added Choose specific Download option to manually select from result list on Upcoming/Details/Weekly tabs, IMP: Added Wanted storyarcs to the overall Wanted section so now will search as per global options (storyarc issues can be displayed optionally on Wanted tab), IMP: Added custom url option for image banners/posters for storyarcs, IMP: updated Cherrypy, FIX: Fixed ComicRN not working when forms authentication used - in conjunction updated autoProcessComics to 2.0 which now uses apikey instead of user/pass, IMP: Alternate Series Covers option for alternating existing series image to the most current issue image, IMP: Added overall series total to series page for reference, IMP: Search workflow completely changed to accomodate more than one indexer - now will simultaneously sumbit initial request to each provider, wait 30s submit additional as required at 30s intervals, FIX: Removed TPSE as an option and relabelled to just Public Torrents, IMP: Added direct backlog search to WWT option (pack support will follow), FIX: Removed line about configparser being required for ComicTagger usage, IMP: Test code in place for newzab testing, FIX: Fixed layout problem with torrents that are in auto-snatch status on weekly tab, IMP: backend code improvements to allow for better alias usage and annual linking directly from WS, IMP: Updated systemd init-scripts with read.me, IMP: When post-processing, will now check for available destination free space before actually moving files, IMP: Will copy during metatagging to cache folder instead of move being an option so cleanup is cleaner if something fails, FIX: Changed readinglist table to storyarcs for clarity, IMP: When post-processing issues, will now only update the one issue status and adjust totals accordingly (instead of doing a complete rescan of the series), FIX: Clear out empty ID's from the Failed DB on startup, IMP: Initial code-run at REST API interface, FIX: Fixed some matching problems with 32p due to case, IMP: removed apikeys from log entries that were accidentally logging, IMP: When searching 32p, if items get packed up - will now delete the cached reference so new items of the same can be located, IMP: ForceSearch option switched to scheduler so simultaneous runs should not occur, FIX: Fixed manual metatagging error that would occur if multiple destination directories existed
2018-02-16 19:57:01 +00:00
arclist = self._dic_from_query("SELECT * from storyarcs WHERE StoryArcID='" + kwargs['arcid'] + "' ORDER BY ReadingOrder")
2017-11-02 18:11:59 +00:00
newarclist = []
arcname = ''
for book in arclist:
arcname = book['StoryArc']
fileexists = False
issue = {}
issue['ReadingOrder'] = book['ReadingOrder']
issue['Title'] = '%s #%s' % (book['ComicName'],book['IssueNumber'])
issue['IssueID'] = book['IssueID']
2017-12-05 15:36:09 +00:00
issue['fileloc'] = ''
if book['Location']:
issue['fileloc'] = book['Location']
fileexists = True
issue['filename'] = os.path.split(book['Location'])[1].encode('utf-8')
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = book['IssueDate']
2017-11-02 18:11:59 +00:00
else:
bookentry = myDB.selectone("SELECT * from issues WHERE IssueID=?", (book['IssueID'],)).fetchone()
2017-12-07 19:02:57 +00:00
if bookentry:
if bookentry['Location']:
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", ( bookentry['ComicID'],)).fetchone()
2017-11-02 18:11:59 +00:00
fileexists = True
issue['fileloc'] = os.path.join(comic['ComicLocation'], bookentry['Location'])
issue['filename'] = bookentry['Location'].encode('utf-8')
issue['image'] = bookentry['ImageURL_ALT']
issue['thumbnail'] = bookentry['ImageURL']
if bookentry['DateAdded']:
issue['updated'] = bookentry['DateAdded']
2017-11-02 18:11:59 +00:00
else:
issue['updated'] = bookentry['IssueDate']
else:
annualentry = myDB.selectone("SELECT * from annuals WHERE IssueID=?", (book['IssueID'],)).fetchone()
2017-12-07 19:02:57 +00:00
if annualentry:
if annualentry['Location']:
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", ( annualentry['ComicID'],))
2017-11-02 18:11:59 +00:00
fileexists = True
issue['fileloc'] = os.path.join(comic['ComicLocation'], annualentry['Location'])
issue['filename'] = annualentry['Location'].encode('utf-8')
2017-11-02 18:11:59 +00:00
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = annualentry['IssueDate']
else:
if book['Location']:
fileexists = True
issue['fileloc'] = book['Location']
issue['filename'] = os.path.split(book['Location'])[1].encode('utf-8')
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = book['IssueDate']
2017-12-05 15:36:09 +00:00
if not os.path.isfile(issue['fileloc']):
fileexists = False
2017-11-02 18:11:59 +00:00
if fileexists:
newarclist.append(issue)
if len(newarclist) > 0:
if index <= len(newarclist):
subset = newarclist[index:(index + self.PAGE_SIZE)]
2017-11-02 18:11:59 +00:00
for issue in subset:
metainfo = None
if mylar.CONFIG.OPDS_METAINFO:
metainfo = mylar.helpers.IssueDetails(issue['fileloc'])
if not metainfo:
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': escape('%s - %s' % (issue['ReadingOrder'], issue['Title'])),
'id': escape('comic:%s' % issue['IssueID']),
'updated': issue['updated'],
'content': escape('%s' % (metainfo[0]['summary'])),
2017-11-03 14:59:05 +00:00
'href': '%s?cmd=Issue&amp;issueid=%s&amp;file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['filename'])),
2017-11-02 18:11:59 +00:00
'kind': 'acquisition',
'rel': 'file',
'author': metainfo[0]['writer'],
'image': issue['image'],
'thumbnail': issue['thumbnail'],
}
)
feed = {}
feed['title'] = 'Mylar OPDS - %s' % escape(arcname)
feed['id'] = escape('storyarc:%s' % kwargs['arcid'])
feed['updated'] = mylar.helpers.now()
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=StoryArc&amp;arcid=%s' % (self.opdsroot, quote_plus(kwargs['arcid'])),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(newarclist) > (index + self.PAGE_SIZE):
links.append(
getLink(href='%s?cmd=StoryArc&amp;arcid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['arcid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
links.append(
getLink(href='%s?cmd=StoryArc&amp;arcid=%s&amp;index=%s' % (self.opdsroot, quote_plus(kwargs['arcid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
2017-11-02 18:11:59 +00:00
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
2017-11-02 18:11:59 +00:00
2017-11-01 02:47:34 +00:00
def getLink(href=None, type=None, rel=None, title=None):
link = {}
if href:
link['href'] = href
if type:
link['type'] = type
if rel:
link['rel'] = rel
if title:
link['title'] = title
return link