2019-12-11 03:20:42 +00:00
|
|
|
import os
|
|
|
|
import ast
|
|
|
|
import libs
|
2019-12-16 04:44:30 +00:00
|
|
|
from datetime import timedelta
|
|
|
|
import datetime
|
|
|
|
import pretty
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
from get_args import args
|
2019-12-31 19:02:49 +00:00
|
|
|
from config import settings, base_url
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
from init import *
|
|
|
|
import logging
|
2019-12-16 04:44:30 +00:00
|
|
|
from database import database
|
2019-12-11 03:20:42 +00:00
|
|
|
from helper import path_replace, path_replace_reverse, path_replace_movie, path_replace_reverse_movie
|
2019-12-15 04:58:51 +00:00
|
|
|
from get_languages import load_language_in_db, alpha2_from_language, alpha3_from_language, language_from_alpha2, \
|
|
|
|
alpha3_from_alpha2
|
2019-12-13 02:59:48 +00:00
|
|
|
|
2019-12-27 22:05:45 +00:00
|
|
|
from flask import Flask, jsonify, request, Response, Blueprint
|
2019-12-13 02:59:48 +00:00
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
from flask_restful import Resource, Api
|
|
|
|
|
2019-12-31 19:02:49 +00:00
|
|
|
api_bp = Blueprint('api', __name__, url_prefix=base_url.rstrip('/')+'/api')
|
2019-12-16 13:58:10 +00:00
|
|
|
api = Api(api_bp)
|
2019-12-15 04:58:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Badges(Resource):
|
|
|
|
def get(self):
|
|
|
|
result = {
|
|
|
|
"missing_episodes": database.execute("SELECT COUNT(*) as count FROM table_episodes WHERE missing_subtitles "
|
|
|
|
"is not null AND missing_subtitles != '[]'", only_one=True)['count'],
|
|
|
|
"missing_movies": database.execute("SELECT COUNT(*) as count FROM table_movies WHERE missing_subtitles "
|
|
|
|
"is not null AND missing_subtitles != '[]'", only_one=True)['count'],
|
|
|
|
"throttled_providers": len(eval(str(settings.general.throtteled_providers)))
|
|
|
|
}
|
|
|
|
return jsonify(result)
|
|
|
|
|
|
|
|
|
2019-12-11 03:20:42 +00:00
|
|
|
class Series(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-11 03:20:42 +00:00
|
|
|
seriesId = request.args.get('id')
|
2019-12-17 00:41:50 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_shows", only_one=True)['count']
|
2019-12-11 03:20:42 +00:00
|
|
|
if seriesId:
|
2019-12-28 16:43:48 +00:00
|
|
|
result = database.execute("SELECT * FROM table_shows WHERE sonarrSeriesId=? ORDER BY sortTitle ASC LIMIT ? "
|
2020-01-02 06:16:00 +00:00
|
|
|
"OFFSET ?", (seriesId, length, start))
|
2019-12-11 03:20:42 +00:00
|
|
|
else:
|
2019-12-28 16:43:48 +00:00
|
|
|
result = database.execute("SELECT * FROM table_shows ORDER BY sortTitle ASC LIMIT ? OFFSET ?", (length, start))
|
2019-12-11 03:20:42 +00:00
|
|
|
for item in result:
|
|
|
|
# Parse audio language
|
2019-12-15 04:58:51 +00:00
|
|
|
if item['audio_language']:
|
|
|
|
item.update({"audio_language": {"name": item['audio_language'],
|
|
|
|
"code2": alpha2_from_language(item['audio_language']),
|
|
|
|
"code3": alpha3_from_language(item['audio_language'])}})
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
# Parse desired languages
|
2019-12-15 04:58:51 +00:00
|
|
|
if item['languages'] and item['languages'] != 'None':
|
|
|
|
item.update({"languages": ast.literal_eval(item['languages'])})
|
|
|
|
for i, subs in enumerate(item['languages']):
|
|
|
|
item['languages'][i] = {"name": language_from_alpha2(subs),
|
|
|
|
"code2": subs,
|
|
|
|
"code3": alpha3_from_alpha2(subs)}
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
# Parse alternate titles
|
2019-12-15 04:58:51 +00:00
|
|
|
if item['alternateTitles']:
|
|
|
|
item.update({"alternateTitles": ast.literal_eval(item['alternateTitles'])})
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isdir(mapped_path)})
|
2019-12-15 04:58:51 +00:00
|
|
|
|
|
|
|
# Add missing subtitles episode count
|
|
|
|
item.update({"episodeMissingCount": database.execute("SELECT COUNT(*) as count FROM table_episodes WHERE "
|
|
|
|
"sonarrSeriesId=? AND missing_subtitles is not null "
|
2019-12-29 04:39:13 +00:00
|
|
|
"AND missing_subtitles != '[]'",
|
|
|
|
(item['sonarrSeriesId'],), only_one=True)['count']})
|
2019-12-15 04:58:51 +00:00
|
|
|
|
|
|
|
# Add episode count
|
|
|
|
item.update({"episodeFileCount": database.execute("SELECT COUNT(*) as count FROM table_episodes WHERE "
|
2019-12-29 04:39:13 +00:00
|
|
|
"sonarrSeriesId=?", (item['sonarrSeriesId'],),
|
2019-12-15 04:58:51 +00:00
|
|
|
only_one=True)['count']})
|
2019-12-17 00:41:50 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=result)
|
2019-12-11 03:20:42 +00:00
|
|
|
|
|
|
|
|
2019-12-14 17:34:14 +00:00
|
|
|
class Episodes(Resource):
|
|
|
|
def get(self):
|
2020-01-02 14:41:30 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-14 17:34:14 +00:00
|
|
|
seriesId = request.args.get('id')
|
2020-01-02 14:41:30 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_episodes WHERE sonarrSeriesId=?",
|
|
|
|
(seriesId,), only_one=True)['count']
|
2019-12-14 17:34:14 +00:00
|
|
|
if seriesId:
|
2020-01-04 05:55:44 +00:00
|
|
|
result = database.execute("SELECT * FROM table_episodes WHERE sonarrSeriesId=? ORDER BY season DESC, "
|
|
|
|
"episode DESC", (seriesId,))
|
2019-12-14 17:34:14 +00:00
|
|
|
else:
|
2019-12-17 00:41:50 +00:00
|
|
|
return "Series ID not provided", 400
|
2019-12-14 17:34:14 +00:00
|
|
|
for item in result:
|
2019-12-15 04:58:51 +00:00
|
|
|
# Parse subtitles
|
|
|
|
if item['subtitles']:
|
|
|
|
item.update({"subtitles": ast.literal_eval(item['subtitles'])})
|
|
|
|
for subs in item['subtitles']:
|
|
|
|
subs[0] = {"name": language_from_alpha2(subs[0]),
|
|
|
|
"code2": subs[0],
|
|
|
|
"code3": alpha3_from_alpha2(subs[0])}
|
|
|
|
|
|
|
|
# Parse missing subtitles
|
|
|
|
if item['missing_subtitles']:
|
|
|
|
item.update({"missing_subtitles": ast.literal_eval(item['missing_subtitles'])})
|
|
|
|
for i, subs in enumerate(item['missing_subtitles']):
|
|
|
|
item['missing_subtitles'][i] = {"name": language_from_alpha2(subs),
|
|
|
|
"code2": subs,
|
|
|
|
"code3": alpha3_from_alpha2(subs)}
|
|
|
|
|
2019-12-14 17:34:14 +00:00
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
2020-01-02 14:41:30 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=result)
|
2019-12-14 17:34:14 +00:00
|
|
|
|
|
|
|
|
2019-12-15 04:58:51 +00:00
|
|
|
class Movies(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-15 04:58:51 +00:00
|
|
|
moviesId = request.args.get('id')
|
2019-12-17 00:41:50 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_movies", only_one=True)['count']
|
2019-12-15 04:58:51 +00:00
|
|
|
if moviesId:
|
2019-12-28 16:43:48 +00:00
|
|
|
result = database.execute("SELECT * FROM table_movies WHERE radarrId=? ORDER BY sortTitle ASC LIMIT ? "
|
|
|
|
"OFFSET ?", (length, start), (moviesId,))
|
2019-12-15 04:58:51 +00:00
|
|
|
else:
|
2019-12-28 16:43:48 +00:00
|
|
|
result = database.execute("SELECT * FROM table_movies ORDER BY sortTitle ASC LIMIT ? OFFSET ?",
|
|
|
|
(length, start))
|
2019-12-15 04:58:51 +00:00
|
|
|
for item in result:
|
|
|
|
# Parse audio language
|
|
|
|
if item['audio_language']:
|
|
|
|
item.update({"audio_language": {"name": item['audio_language'],
|
|
|
|
"code2": alpha2_from_language(item['audio_language']),
|
|
|
|
"code3": alpha3_from_language(item['audio_language'])}})
|
|
|
|
|
|
|
|
# Parse desired languages
|
|
|
|
if item['languages'] and item['languages'] != 'None':
|
|
|
|
item.update({"languages": ast.literal_eval(item['languages'])})
|
|
|
|
for i, subs in enumerate(item['languages']):
|
|
|
|
item['languages'][i] = {"name": language_from_alpha2(subs),
|
|
|
|
"code2": subs,
|
|
|
|
"code3": alpha3_from_alpha2(subs)}
|
|
|
|
|
|
|
|
# Parse alternate titles
|
|
|
|
if item['alternativeTitles']:
|
|
|
|
item.update({"alternativeTitles": ast.literal_eval(item['alternativeTitles'])})
|
|
|
|
|
|
|
|
# Parse failed attempts
|
|
|
|
if item['failedAttempts']:
|
|
|
|
item.update({"failedAttempts": ast.literal_eval(item['failedAttempts'])})
|
|
|
|
|
|
|
|
# Parse subtitles
|
|
|
|
if item['subtitles']:
|
|
|
|
item.update({"subtitles": ast.literal_eval(item['subtitles'])})
|
|
|
|
for subs in item['subtitles']:
|
|
|
|
subs[0] = {"name": language_from_alpha2(subs[0]),
|
|
|
|
"code2": subs[0],
|
|
|
|
"code3": alpha3_from_alpha2(subs[0])}
|
|
|
|
|
|
|
|
# Parse missing subtitles
|
|
|
|
if item['missing_subtitles']:
|
|
|
|
item.update({"missing_subtitles": ast.literal_eval(item['missing_subtitles'])})
|
|
|
|
for i, subs in enumerate(item['missing_subtitles']):
|
|
|
|
item['missing_subtitles'][i] = {"name": language_from_alpha2(subs),
|
|
|
|
"code2": subs,
|
|
|
|
"code3": alpha3_from_alpha2(subs)}
|
|
|
|
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace_movie(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
2019-12-17 00:41:50 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=result)
|
2019-12-15 04:58:51 +00:00
|
|
|
|
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
class HistorySeries(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
upgradable_episodes_not_perfect = []
|
|
|
|
if settings.general.getboolean('upgrade_subs'):
|
|
|
|
days_to_upgrade_subs = settings.general.days_to_upgrade_subs
|
|
|
|
minimum_timestamp = ((datetime.datetime.now() - timedelta(days=int(days_to_upgrade_subs))) -
|
|
|
|
datetime.datetime(1970, 1, 1)).total_seconds()
|
|
|
|
|
|
|
|
if settings.general.getboolean('upgrade_manual'):
|
|
|
|
query_actions = [1, 2, 3]
|
|
|
|
else:
|
|
|
|
query_actions = [1, 3]
|
|
|
|
|
|
|
|
if settings.sonarr.getboolean('only_monitored'):
|
|
|
|
series_monitored_only_query_string = " AND monitored='True'"
|
|
|
|
else:
|
|
|
|
series_monitored_only_query_string = ''
|
|
|
|
|
|
|
|
upgradable_episodes = database.execute(
|
|
|
|
"SELECT video_path, MAX(timestamp) as timestamp, score FROM table_history "
|
|
|
|
"INNER JOIN table_episodes on table_episodes.sonarrEpisodeId = "
|
|
|
|
"table_history.sonarrEpisodeId WHERE action IN (" +
|
|
|
|
','.join(map(str, query_actions)) + ") AND timestamp > ? AND "
|
|
|
|
"score is not null" + series_monitored_only_query_string + " GROUP BY "
|
|
|
|
"table_history.video_path, table_history.language",
|
|
|
|
(minimum_timestamp,))
|
|
|
|
|
|
|
|
for upgradable_episode in upgradable_episodes:
|
|
|
|
if upgradable_episode['timestamp'] > minimum_timestamp:
|
|
|
|
try:
|
|
|
|
int(upgradable_episode['score'])
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if int(upgradable_episode['score']) < 360:
|
|
|
|
upgradable_episodes_not_perfect.append(upgradable_episode)
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_history", only_one=True)['count']
|
2019-12-16 04:44:30 +00:00
|
|
|
data = database.execute("SELECT table_history.action, table_shows.title as seriesTitle, "
|
|
|
|
"table_episodes.season || 'x' || table_episodes.episode as episode_number, "
|
|
|
|
"table_episodes.title as episodeTitle, table_history.timestamp, "
|
|
|
|
"table_history.description, table_history.sonarrSeriesId, table_episodes.path, "
|
|
|
|
"table_history.language, table_history.score FROM table_history LEFT JOIN table_shows "
|
|
|
|
"on table_shows.sonarrSeriesId = table_history.sonarrSeriesId LEFT JOIN table_episodes "
|
|
|
|
"on table_episodes.sonarrEpisodeId = table_history.sonarrEpisodeId WHERE "
|
2019-12-17 00:41:50 +00:00
|
|
|
"table_episodes.title is not NULL ORDER BY timestamp DESC LIMIT ? OFFSET ?",
|
|
|
|
(length, start))
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
for item in data:
|
|
|
|
# Mark episode as upgradable or not
|
|
|
|
if {"video_path": str(item['path']), "timestamp": float(item['timestamp']), "score": str(item['score'])} in upgradable_episodes_not_perfect:
|
|
|
|
item.update({"upgradable": True})
|
|
|
|
else:
|
|
|
|
item.update({"upgradable": False})
|
|
|
|
|
|
|
|
# Parse language
|
|
|
|
if item['language'] and item['language'] != 'None':
|
|
|
|
splitted_language = item['language'].split(':')
|
|
|
|
item['language'] = {"name": language_from_alpha2(splitted_language[0]),
|
|
|
|
"code2": splitted_language[0],
|
|
|
|
"code3": alpha3_from_alpha2(splitted_language[0]),
|
|
|
|
"forced": True if len(splitted_language) > 1 else False}
|
|
|
|
|
|
|
|
# Make timestamp pretty
|
|
|
|
if item['timestamp']:
|
|
|
|
item['timestamp'] = pretty.date(int(item['timestamp']))
|
|
|
|
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=data)
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
class HistoryMovies(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
2019-12-16 17:46:03 +00:00
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
upgradable_movies = []
|
|
|
|
upgradable_movies_not_perfect = []
|
|
|
|
if settings.general.getboolean('upgrade_subs'):
|
|
|
|
days_to_upgrade_subs = settings.general.days_to_upgrade_subs
|
|
|
|
minimum_timestamp = ((datetime.datetime.now() - timedelta(days=int(days_to_upgrade_subs))) -
|
|
|
|
datetime.datetime(1970, 1, 1)).total_seconds()
|
|
|
|
|
|
|
|
if settings.radarr.getboolean('only_monitored'):
|
|
|
|
movies_monitored_only_query_string = ' AND table_movies.monitored = "True"'
|
|
|
|
else:
|
|
|
|
movies_monitored_only_query_string = ""
|
|
|
|
|
|
|
|
if settings.general.getboolean('upgrade_manual'):
|
|
|
|
query_actions = [1, 2, 3]
|
|
|
|
else:
|
|
|
|
query_actions = [1, 3]
|
|
|
|
|
|
|
|
upgradable_movies = database.execute(
|
|
|
|
"SELECT video_path, MAX(timestamp) as timestamp, score FROM table_history_movie "
|
|
|
|
"INNER JOIN table_movies on table_movies.radarrId=table_history_movie.radarrId WHERE action IN (" +
|
|
|
|
','.join(map(str, query_actions)) + ") AND timestamp > ? AND score is not NULL" +
|
|
|
|
movies_monitored_only_query_string + " GROUP BY video_path, language", (minimum_timestamp,))
|
|
|
|
|
|
|
|
for upgradable_movie in upgradable_movies:
|
|
|
|
if upgradable_movie['timestamp'] > minimum_timestamp:
|
|
|
|
try:
|
|
|
|
int(upgradable_movie['score'])
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if int(upgradable_movie['score']) < 120:
|
|
|
|
upgradable_movies_not_perfect.append(upgradable_movie)
|
|
|
|
|
2019-12-16 17:46:03 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_history_movie", only_one=True)['count']
|
2019-12-16 04:44:30 +00:00
|
|
|
data = database.execute("SELECT table_history_movie.action, table_movies.title, table_history_movie.timestamp, "
|
|
|
|
"table_history_movie.description, table_history_movie.radarrId, "
|
|
|
|
"table_history_movie.video_path, table_history_movie.language, "
|
|
|
|
"table_history_movie.score FROM table_history_movie LEFT JOIN table_movies on "
|
2019-12-16 17:46:03 +00:00
|
|
|
"table_movies.radarrId = table_history_movie.radarrId ORDER BY timestamp DESC LIMIT ? "
|
|
|
|
"OFFSET ?", (length, start))
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
for item in data:
|
|
|
|
# Mark movies as upgradable or not
|
|
|
|
if {"video_path": str(item['video_path']), "timestamp": float(item['timestamp']), "score": str(item['score'])} in upgradable_movies_not_perfect:
|
|
|
|
item.update({"upgradable": True})
|
|
|
|
else:
|
|
|
|
item.update({"upgradable": False})
|
|
|
|
|
|
|
|
# Parse language
|
|
|
|
if item['language'] and item['language'] != 'None':
|
|
|
|
splitted_language = item['language'].split(':')
|
|
|
|
item['language'] = {"name": language_from_alpha2(splitted_language[0]),
|
|
|
|
"code2": splitted_language[0],
|
|
|
|
"code3": alpha3_from_alpha2(splitted_language[0]),
|
|
|
|
"forced": True if len(splitted_language) > 1 else False}
|
|
|
|
|
|
|
|
# Make timestamp pretty
|
|
|
|
if item['timestamp']:
|
|
|
|
item['timestamp'] = pretty.date(int(item['timestamp']))
|
|
|
|
|
|
|
|
if item['video_path']:
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace_movie(item['video_path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
|
|
|
else:
|
|
|
|
item.update({"mapped_path": None})
|
|
|
|
item.update({"exist": False})
|
|
|
|
|
2019-12-16 17:46:03 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=data)
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
class WantedSeries(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
if settings.sonarr.getboolean('only_monitored'):
|
|
|
|
monitored_only_query_string = " AND monitored='True'"
|
|
|
|
else:
|
|
|
|
monitored_only_query_string = ''
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_episodes", only_one=True)['count']
|
2019-12-16 04:44:30 +00:00
|
|
|
data = database.execute("SELECT table_shows.title as seriesTitle, "
|
|
|
|
"table_episodes.season || 'x' || table_episodes.episode as episode_number, "
|
|
|
|
"table_episodes.title as episodeTitle, table_episodes.missing_subtitles, "
|
|
|
|
"table_episodes.sonarrSeriesId, table_episodes.path, table_shows.hearing_impaired, "
|
|
|
|
"table_episodes.sonarrEpisodeId, table_episodes.scene_name, "
|
|
|
|
"table_episodes.failedAttempts FROM table_episodes INNER JOIN table_shows on "
|
|
|
|
"table_shows.sonarrSeriesId = table_episodes.sonarrSeriesId WHERE "
|
|
|
|
"table_episodes.missing_subtitles != '[]'" + monitored_only_query_string +
|
2019-12-17 00:41:50 +00:00
|
|
|
" ORDER BY table_episodes._rowid_ DESC LIMIT ? OFFSET ?", (length, start))
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
for item in data:
|
|
|
|
# Parse missing subtitles
|
|
|
|
if item['missing_subtitles']:
|
|
|
|
item.update({"missing_subtitles": ast.literal_eval(item['missing_subtitles'])})
|
|
|
|
for i, subs in enumerate(item['missing_subtitles']):
|
|
|
|
splitted_subs = subs.split(':')
|
|
|
|
item['missing_subtitles'][i] = {"name": language_from_alpha2(splitted_subs[0]),
|
|
|
|
"code2": splitted_subs[0],
|
|
|
|
"code3": alpha3_from_alpha2(splitted_subs[0]),
|
|
|
|
"forced": True if len(splitted_subs) > 1 else False}
|
|
|
|
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=data)
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
class WantedMovies(Resource):
|
|
|
|
def get(self):
|
2019-12-17 00:41:50 +00:00
|
|
|
start = request.args.get('start') or 0
|
|
|
|
length = request.args.get('length') or -1
|
|
|
|
draw = request.args.get('draw')
|
|
|
|
|
2019-12-16 04:44:30 +00:00
|
|
|
if settings.radarr.getboolean('only_monitored'):
|
|
|
|
monitored_only_query_string = " AND monitored='True'"
|
|
|
|
else:
|
|
|
|
monitored_only_query_string = ''
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
row_count = database.execute("SELECT COUNT(*) as count FROM table_movies", only_one=True)['count']
|
2019-12-16 04:44:30 +00:00
|
|
|
data = database.execute("SELECT title, missing_subtitles, radarrId, path, hearing_impaired, sceneName, "
|
|
|
|
"failedAttempts FROM table_movies WHERE missing_subtitles != '[]'" +
|
2019-12-17 00:41:50 +00:00
|
|
|
monitored_only_query_string + " ORDER BY _rowid_ DESC LIMIT ? OFFSET ?",
|
|
|
|
(length, start))
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
for item in data:
|
|
|
|
# Parse missing subtitles
|
|
|
|
if item['missing_subtitles']:
|
|
|
|
item.update({"missing_subtitles": ast.literal_eval(item['missing_subtitles'])})
|
|
|
|
for i, subs in enumerate(item['missing_subtitles']):
|
|
|
|
splitted_subs = subs.split(':')
|
|
|
|
item['missing_subtitles'][i] = {"name": language_from_alpha2(splitted_subs[0]),
|
|
|
|
"code2": splitted_subs[0],
|
|
|
|
"code3": alpha3_from_alpha2(splitted_subs[0]),
|
|
|
|
"forced": True if len(splitted_subs) > 1 else False}
|
|
|
|
|
|
|
|
# Provide mapped path
|
|
|
|
mapped_path = path_replace_movie(item['path'])
|
|
|
|
item.update({"mapped_path": mapped_path})
|
|
|
|
|
|
|
|
# Confirm if path exist
|
|
|
|
item.update({"exist": os.path.isfile(mapped_path)})
|
|
|
|
|
2019-12-17 00:41:50 +00:00
|
|
|
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=data)
|
2019-12-16 04:44:30 +00:00
|
|
|
|
|
|
|
|
2019-12-28 05:52:00 +00:00
|
|
|
api.add_resource(Badges, '/badges')
|
|
|
|
api.add_resource(Series, '/series')
|
|
|
|
api.add_resource(Episodes, '/episodes')
|
|
|
|
api.add_resource(Movies, '/movies')
|
|
|
|
api.add_resource(HistorySeries, '/history_series')
|
|
|
|
api.add_resource(HistoryMovies, '/history_movies')
|
|
|
|
api.add_resource(WantedSeries, '/wanted_series')
|
|
|
|
api.add_resource(WantedMovies, '/wanted_movies')
|