Merge branch 'development' into autonomous

# Conflicts:
#	bazarr/api/episodes/episodes_subtitles.py
#	bazarr/api/movies/movies_subtitles.py
#	bazarr/api/providers/providers_episodes.py
#	bazarr/api/providers/providers_movies.py
#	bazarr/config.py
#	bazarr/database.py
#	bazarr/get_series.py
#	bazarr/get_subtitle.py
#	frontend/src/App/Header.tsx
This commit is contained in:
morpheus65535 2021-12-25 09:21:41 -05:00
commit c7a8e83982
46 changed files with 1498 additions and 1096 deletions

View File

@ -46,6 +46,7 @@ If you need something that is not already part of Bazarr, feel free to create a
* Assrt
* BetaSeries
* BSplayer
* Embedded Subtitles
* GreekSubtitles
* Hosszupuska
* LegendasDivx

View File

@ -6,7 +6,7 @@ from flask import request
from flask_restful import Resource
from subliminal_patch.core import SUBTITLE_EXTENSIONS
from database import TableEpisodes, get_audio_profile_languages
from database import TableEpisodes, get_audio_profile_languages, get_profile_id
from ..utils import authenticate
from get_providers import get_providers, get_providers_auth
from get_subtitle import download_subtitle, manual_upload_subtitle
@ -50,7 +50,8 @@ class EpisodesSubtitles(Resource):
try:
result = download_subtitle(episodePath, language, audio_language, hi, forced, providers_list,
providers_auth, title, 'series')
providers_auth, title, 'series',
profile_id=get_profile_id(episode_id=episodeId))
if result is not None:
message = result[0]
path = result[1]

View File

@ -6,7 +6,7 @@ from flask import request
from flask_restful import Resource
from subliminal_patch.core import SUBTITLE_EXTENSIONS
from database import TableMovies, get_audio_profile_languages
from database import TableMovies, get_audio_profile_languages, get_profile_id
from ..utils import authenticate
from get_providers import get_providers, get_providers_auth
from get_subtitle import download_subtitle, manual_upload_subtitle
@ -53,7 +53,8 @@ class MoviesSubtitles(Resource):
try:
result = download_subtitle(moviePath, language, audio_language, hi, forced, providers_list,
providers_auth, title, 'movie')
providers_auth, title, 'movie',
profile_id=get_profile_id(movie_id=movieId))
if result is not None:
message = result[0]
path = result[1]

View File

@ -3,7 +3,7 @@
from flask import request, jsonify
from flask_restful import Resource
from database import TableEpisodes, TableShows, get_audio_profile_languages
from database import TableEpisodes, TableShows, get_audio_profile_languages, get_profile_id
from get_providers import get_providers, get_providers_auth
from get_subtitle import manual_search, manual_download_subtitle
from utils import history_log
@ -68,7 +68,8 @@ class ProviderEpisodes(Resource):
try:
result = manual_download_subtitle(episodePath, language, audio_language, hi, forced, subtitle,
selected_provider, providers_auth, title, 'series')
selected_provider, providers_auth, title, 'series',
profile_id=get_profile_id(episode_id=sonarrEpisodeId))
if result is not None:
message = result[0]
path = result[1]

View File

@ -3,7 +3,7 @@
from flask import request, jsonify
from flask_restful import Resource
from database import TableMovies, get_audio_profile_languages
from database import TableMovies, get_audio_profile_languages, get_profile_id
from get_providers import get_providers, get_providers_auth
from get_subtitle import manual_search, manual_download_subtitle
from utils import history_log_movie
@ -69,7 +69,8 @@ class ProviderMovies(Resource):
try:
result = manual_download_subtitle(moviePath, language, audio_language, hi, forced, subtitle,
selected_provider, providers_auth, title, 'movie')
selected_provider, providers_auth, title, 'movie',
profile_id=get_profile_id(movie_id=radarrId))
if result is not None:
message = result[0]
path = result[1]

View File

@ -37,4 +37,5 @@ class Searches(Resource):
movies = list(movies)
search_list += movies
return jsonify(search_list)

View File

@ -56,7 +56,9 @@ class SystemSettings(Resource):
TableLanguagesProfiles.update({
TableLanguagesProfiles.name: item['name'],
TableLanguagesProfiles.cutoff: item['cutoff'] if item['cutoff'] != 'null' else None,
TableLanguagesProfiles.items: json.dumps(item['items'])
TableLanguagesProfiles.items: json.dumps(item['items']),
TableLanguagesProfiles.mustContain: item['mustContain'],
TableLanguagesProfiles.mustNotContain: item['mustNotContain'],
})\
.where(TableLanguagesProfiles.profileId == item['profileId'])\
.execute()
@ -67,7 +69,9 @@ class SystemSettings(Resource):
TableLanguagesProfiles.profileId: item['profileId'],
TableLanguagesProfiles.name: item['name'],
TableLanguagesProfiles.cutoff: item['cutoff'] if item['cutoff'] != 'null' else None,
TableLanguagesProfiles.items: json.dumps(item['items'])
TableLanguagesProfiles.items: json.dumps(item['items']),
TableLanguagesProfiles.mustContain: item['mustContain'],
TableLanguagesProfiles.mustNotContain: item['mustNotContain'],
}).execute()
for profileId in existing:
# Unassign this profileId from series and movies

View File

@ -54,6 +54,8 @@ defaults = {
'ignore_vobsub_subs': 'False',
'ignore_ass_subs': 'False',
'adaptive_searching': 'False',
'adaptive_searching_delay': '3w',
'adaptive_searching_delta': '1w',
'enabled_providers': '[]',
'multithreading': 'True',
'chmod_enabled': 'False',
@ -164,7 +166,8 @@ defaults = {
},
'addic7ed': {
'username': '',
'password': ''
'password': '',
'vip': 'False'
},
'podnapisi': {
'verify_ssl': 'True'
@ -211,6 +214,10 @@ defaults = {
'skip_wrong_fps': 'False',
'approved_only': 'False',
'multithreading': 'True'
},
'embeddedsubtitles': {
'include_ass': 'True',
'include_srt': 'True',
}
}

View File

@ -138,6 +138,15 @@ class TableBlacklist(BaseModel):
table_name = 'table_blacklist'
class TableLanguagesProfiles(BaseModel):
cutoff = IntegerField(null=True)
items = TextField()
name = TextField()
profileId = AutoField()
mustContain = TextField(null=True)
mustNotContain = TextField(null=True)
class TableMoviesRootfolder(BaseModel):
accessible = IntegerField(null=True)
error = TextField(null=True)
@ -294,6 +303,8 @@ def init_db():
def migrate_db():
migrate(
migrator.add_column('table_shows', 'monitored', TextField(null=True))
migrator.add_column('table_languages_profiles', 'mustContain', TextField(null=True)),
migrator.add_column('table_languages_profiles', 'mustNotContain', TextField(null=True)),
)
@ -327,10 +338,16 @@ def update_profile_id_list():
profile_id_list = TableLanguagesProfiles.select(TableLanguagesProfiles.profileId,
TableLanguagesProfiles.name,
TableLanguagesProfiles.cutoff,
TableLanguagesProfiles.items).dicts()
TableLanguagesProfiles.items,
TableLanguagesProfiles.mustContain,
TableLanguagesProfiles.mustNotContain).dicts()
profile_id_list = list(profile_id_list)
for profile in profile_id_list:
profile['items'] = json.loads(profile['items'])
profile['mustContain'] = ast.literal_eval(profile['mustContain']) if profile['mustContain'] else \
profile['mustContain']
profile['mustNotContain'] = ast.literal_eval(profile['mustNotContain']) if profile['mustNotContain'] else \
profile['mustNotContain']
def get_profiles_list(profile_id=None):
@ -355,7 +372,7 @@ def get_desired_languages(profile_id):
if profile_id and profile_id != 'null':
for profile in profile_id_list:
profileId, name, cutoff, items = profile.values()
profileId, name, cutoff, items, mustContain, mustNotContain = profile.values()
if profileId == int(profile_id):
languages = [x['language'] for x in items]
break
@ -371,7 +388,7 @@ def get_profile_id_name(profile_id):
if profile_id and profile_id != 'null':
for profile in profile_id_list:
profileId, name, cutoff, items = profile.values()
profileId, name, cutoff, items, mustContain, mustNotContain = profile.values()
if profileId == int(profile_id):
name_from_id = name
break
@ -388,7 +405,7 @@ def get_profile_cutoff(profile_id):
if profile_id and profile_id != 'null':
cutoff_language = []
for profile in profile_id_list:
profileId, name, cutoff, items = profile.values()
profileId, name, cutoff, items, mustContain, mustNotContain = profile.values()
if cutoff:
if profileId == int(profile_id):
for item in items:
@ -431,6 +448,22 @@ def get_audio_profile_languages(series_id=None, episode_id=None, movie_id=None):
return audio_languages
def get_profile_id(series_id=None, episode_id=None, movie_id=None):
if series_id:
profileId = TableShows.get(TableShows.sonarrSeriesId == series_id).profileId
elif episode_id:
profileId = TableShows.select(TableShows.profileId)\
.join(TableEpisodes, on=(TableShows.sonarrSeriesId == TableEpisodes.sonarrSeriesId))\
.where(TableEpisodes.sonarrEpisodeId == episode_id)\
.get().profileId
elif movie_id:
profileId = TableMovies.get(TableMovies.radarrId == movie_id).profileId
else:
return None
return profileId
def convert_list_to_clause(arr: list):
if isinstance(arr, list):
return f"({','.join(str(x) for x in arr)})"

View File

@ -13,6 +13,7 @@ import requests
from get_args import args
from config import settings, get_array_from
from event_handler import event_stream
from utils import get_binary
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ParseResponseError, IPAddressBlocked
from subliminal.providers.opensubtitles import DownloadLimitReached
from subliminal.exceptions import DownloadLimitExceeded, ServiceUnavailable
@ -128,6 +129,7 @@ def get_providers_auth():
'addic7ed': {
'username': settings.addic7ed.username,
'password': settings.addic7ed.password,
'is_vip': settings.addic7ed.getboolean('vip'),
},
'opensubtitles': {
'username': settings.opensubtitles.username,
@ -200,6 +202,13 @@ def get_providers_auth():
'email': settings.ktuvit.email,
'hashed_password': settings.ktuvit.hashed_password,
},
'embeddedsubtitles': {
'include_ass': settings.embeddedsubtitles.getboolean('include_ass'),
'include_srt': settings.embeddedsubtitles.getboolean('include_srt'),
'cache_dir': os.path.join(args.config_dir, "cache"),
'ffprobe_path': get_binary("ffprobe"),
'ffmpeg_path': get_binary("ffmpeg"),
}
}
@ -320,9 +329,11 @@ def get_throttled_providers():
if os.path.exists(os.path.join(args.config_dir, 'config', 'throttled_providers.dat')):
with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'throttled_providers.dat')), 'r') as \
handle:
providers = ast.literal_eval(handle.read())
except (OSError, ValueError):
providers = {}
providers = eval(handle.read())
except:
# set empty content in throttled_providers.dat
logging.error("Invalid content in throttled_providers.dat. Resetting")
set_throttled_providers(providers)
finally:
return providers
@ -332,12 +343,6 @@ def set_throttled_providers(data):
handle.write(data)
try:
tp = get_throttled_providers()
if not isinstance(tp, dict):
raise ValueError('tp should be a dict')
except Exception:
logging.error("Invalid content in throttled_providers.dat. Resetting")
# set empty content in throttled_providers.dat
set_throttled_providers('')
tp = get_throttled_providers()
tp = get_throttled_providers()
if not isinstance(tp, dict):
raise ValueError('tp should be a dict')

View File

@ -71,8 +71,8 @@ def get_video(path, title, providers=None, media_type="movie"):
def download_subtitle(path, language, audio_language, hi, forced, providers, providers_auth, title,
media_type, forced_minimum_score=None, is_upgrade=False):
# TODO: supply all missing languages, not only one, to hit providers only once who support multiple languages in
media_type, forced_minimum_score=None, is_upgrade=False, profile_id=None):
# fixme: supply all missing languages, not only one, to hit providers only once who support multiple languages in
# one query
if settings.general.getboolean('utf8_encode'):
@ -139,6 +139,7 @@ def download_subtitle(path, language, audio_language, hi, forced, providers, pro
compute_score=compute_score,
throttle_time=None, # TODO
blacklist=get_blacklist(media_type=media_type),
ban_list=get_ban_list(profile_id),
throttle_callback=provider_throttle,
score_obj=handler,
pre_download_hook=None, # TODO
@ -332,6 +333,7 @@ def manual_search(path, profileId, providers, providers_auth, title, media_type)
providers=providers,
provider_configs=providers_auth,
blacklist=get_blacklist(media_type=media_type),
ban_list=get_ban_list(profileId),
throttle_callback=provider_throttle,
language_hook=None) # TODO
@ -346,6 +348,7 @@ def manual_search(path, profileId, providers, providers_auth, title, media_type)
providers=['subscene'],
provider_configs=providers_auth,
blacklist=get_blacklist(media_type=media_type),
ban_list=get_ban_list(profileId),
throttle_callback=provider_throttle,
language_hook=None) # TODO
providers_auth['subscene']['only_foreign'] = False
@ -437,7 +440,7 @@ def manual_search(path, profileId, providers, providers_auth, title, media_type)
def manual_download_subtitle(path, language, audio_language, hi, forced, subtitle, provider, providers_auth, title,
media_type):
media_type, profile_id):
logging.debug('BAZARR Manually downloading Subtitles for this file: ' + path)
if settings.general.getboolean('utf8_encode'):
@ -468,6 +471,7 @@ def manual_download_subtitle(path, language, audio_language, hi, forced, subtitl
provider_configs=providers_auth,
pool_class=provider_pool(),
blacklist=get_blacklist(media_type=media_type),
ban_list=get_ban_list(profile_id),
throttle_callback=provider_throttle)
logging.debug('BAZARR Subtitles file downloaded for this file:' + path)
else:
@ -715,12 +719,13 @@ def series_download_subtitles(no):
"ignored because of monitored status, series type or series tags: {}".format(no))
return
providers_list = get_providers()
providers_auth = get_providers_auth()
count_episodes_details = len(episodes_details)
for i, episode in enumerate(episodes_details):
providers_list = get_providers()
if providers_list:
show_progress(id='series_search_progress_{}'.format(no),
header='Searching missing subtitles...',
@ -803,10 +808,11 @@ def episode_download_subtitles(no, send_progress=False):
logging.debug("BAZARR no episode with that episodeId can be found in database:", str(no))
return
providers_list = get_providers()
providers_auth = get_providers_auth()
for episode in episodes_details:
providers_list = get_providers()
if providers_list:
if send_progress:
show_progress(id='episode_search_progress_{}'.format(no),
@ -885,7 +891,6 @@ def movies_download_subtitles(no):
else:
movie = movies[0]
providers_list = get_providers()
providers_auth = get_providers_auth()
if ast.literal_eval(movie['missing_subtitles']):
@ -894,15 +899,17 @@ def movies_download_subtitles(no):
count_movie = 0
for i, language in enumerate(ast.literal_eval(movie['missing_subtitles'])):
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableMovies.select(TableMovies.missing_subtitles)\
.where(TableMovies.movieId == movie['movieId'])\
.dicts()\
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
providers_list = get_providers()
if providers_list:
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableMovies.select(TableMovies.missing_subtitles) \
.where(TableMovies.movieId == movie['movieId']) \
.dicts() \
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
show_progress(id='movie_search_progress_{}'.format(no),
header='Searching missing subtitles...',
name=movie['title'],
@ -962,76 +969,70 @@ def wanted_download_subtitles(episode_id):
.dicts()
episodes_details = list(episodes_details)
providers_list = get_providers()
providers_auth = get_providers_auth()
for episode in episodes_details:
attempt = episode['failedAttempts']
if type(attempt) == str:
attempt = ast.literal_eval(attempt)
for language in ast.literal_eval(episode['missing_subtitles']):
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableEpisodes.select(TableEpisodes.missing_subtitles) \
.where(TableEpisodes.episodeId == episode['episodeId']) \
.dicts() \
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
providers_list = get_providers()
if attempt is None:
attempt = []
attempt.append([language, time.time()])
else:
att = list(zip(*attempt))[0]
if language not in att:
attempt.append([language, time.time()])
if providers_list:
for language in ast.literal_eval(episode['missing_subtitles']):
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableEpisodes.select(TableEpisodes.missing_subtitles) \
.where(TableEpisodes.episodeId == episode['episodeId']) \
.dicts() \
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
TableEpisodes.update({TableEpisodes.failedAttempts: str(attempt)})\
.where(TableEpisodes.episodeId == episode['episodeId'])\
.execute()
if is_search_active(desired_language=language, attempt_string=episode['failedAttempts']):
TableEpisodes.update({TableEpisodes.failedAttempts:
updateFailedAttempts(desired_language=language,
attempt_string=episode['failedAttempts'])}) \
.where(TableEpisodes.episodeId == episode['episodeId']) \
.execute()
for i in range(len(attempt)):
if attempt[i][0] == language:
if search_active(attempt[i][1]):
audio_language_list = get_audio_profile_languages(episode_id=episode['episodeId'])
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
audio_language = 'None'
result = download_subtitle(episode['path'],
language.split(':')[0],
audio_language,
"True" if language.endswith(':hi') else "False",
"True" if language.endswith(':forced') else "False",
providers_list,
providers_auth,
episode['title'],
'series')
if result is not None:
message = result[0]
path = result[1]
forced = result[5]
if result[8]:
language_code = result[2] + ":hi"
elif forced:
language_code = result[2] + ":forced"
else:
language_code = result[2]
provider = result[3]
score = result[4]
subs_id = result[6]
subs_path = result[7]
store_subtitles(episode['path'])
history_log(1, episode['seriesId'], episode['episodeId'], message, path,
language_code, provider, score, subs_id, subs_path)
event_stream(type='series', action='update', payload=episode['seriesId'])
event_stream(type='episode-wanted', action='delete', payload=episode['episodeId'])
send_notifications(episode['seriesId'], episode['episodeId'], message)
audio_language_list = get_audio_profile_languages(episode_id=episode['episodeId'])
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
logging.debug(
'BAZARR Search is not active for episode ' + episode['path'] + ' Language: ' + attempt[i][
0])
audio_language = 'None'
result = download_subtitle(episode['path'],
language.split(':')[0],
audio_language,
"True" if language.endswith(':hi') else "False",
"True" if language.endswith(':forced') else "False",
providers_list,
providers_auth,
episode['title'],
'series')
if result is not None:
message = result[0]
path = result[1]
forced = result[5]
if result[8]:
language_code = result[2] + ":hi"
elif forced:
language_code = result[2] + ":forced"
else:
language_code = result[2]
provider = result[3]
score = result[4]
subs_id = result[6]
subs_path = result[7]
store_subtitles(episode['path'])
history_log(1, episode['seriesId'], episode['episodeId'], message, path,
language_code, provider, score, subs_id, subs_path)
event_stream(type='series', action='update', payload=episode['seriesId'])
event_stream(type='episode-wanted', action='delete', payload=episode['episodeId'])
send_notifications(episode['seriesId'], episode['episodeId'], message)
else:
logging.debug(
f"BAZARR Search is throttled by adaptive search for this episode {episode['path']} and "
f"language: {language}")
else:
logging.info("BAZARR All providers are throttled")
break
def wanted_download_subtitles_movie(movie_id):
@ -1045,75 +1046,68 @@ def wanted_download_subtitles_movie(movie_id):
.dicts()
movies_details = list(movies_details)
providers_list = get_providers()
providers_auth = get_providers_auth()
for movie in movies_details:
attempt = movie['failedAttempts']
if type(attempt) == str:
attempt = ast.literal_eval(attempt)
for language in ast.literal_eval(movie['missing_subtitles']):
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableMovies.select(TableMovies.missing_subtitles) \
.where(TableMovies.movieId == movie['movieId']) \
.dicts() \
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
providers_list = get_providers()
if attempt is None:
attempt = []
attempt.append([language, time.time()])
else:
att = list(zip(*attempt))[0]
if language not in att:
attempt.append([language, time.time()])
if providers_list:
for language in ast.literal_eval(movie['missing_subtitles']):
# confirm if language is still missing or if cutoff have been reached
confirmed_missing_subs = TableMovies.select(TableMovies.missing_subtitles) \
.where(TableMovies.movieId == movie['movieId']) \
.dicts() \
.get()
if language not in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):
continue
TableMovies.update({TableMovies.failedAttempts: str(attempt)})\
.where(TableMovies.movieId == movie['movieId'])\
.execute()
if is_search_active(desired_language=language, attempt_string=movie['failedAttempts']):
TableMovies.update({TableMovies.failedAttempts:
updateFailedAttempts(desired_language=language,
attempt_string=movie['failedAttempts'])}) \
.where(TableMovies.movieId == movie['movieId']) \
.execute()
for i in range(len(attempt)):
if attempt[i][0] == language:
if search_active(attempt[i][1]) is True:
audio_language_list = get_audio_profile_languages(movie_id=movie['movieId'])
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
audio_language = 'None'
result = download_subtitle(movie['path'],
language.split(':')[0],
audio_language,
"True" if language.endswith(':hi') else "False",
"True" if language.endswith(':forced') else "False",
providers_list,
providers_auth,
movie['title'],
'movie')
if result is not None:
message = result[0]
path = result[1]
forced = result[5]
if result[8]:
language_code = result[2] + ":hi"
elif forced:
language_code = result[2] + ":forced"
else:
language_code = result[2]
provider = result[3]
score = result[4]
subs_id = result[6]
subs_path = result[7]
store_subtitles_movie(movie['path'])
history_log_movie(1, movie['movieId'], message, path, language_code, provider, score,
subs_id, subs_path)
event_stream(type='movie-wanted', action='delete', payload=movie['movieId'])
send_notifications_movie(movie['movieId'], message)
audio_language_list = get_audio_profile_languages(movie_id=movie['movieId'])
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
logging.info(
'BAZARR Search is not active for this Movie ' + movie['path'] + ' Language: ' + attempt[i][
0])
audio_language = 'None'
result = download_subtitle(movie['path'],
language.split(':')[0],
audio_language,
"True" if language.endswith(':hi') else "False",
"True" if language.endswith(':forced') else "False",
providers_list,
providers_auth,
movie['title'],
'movie')
if result is not None:
message = result[0]
path = result[1]
forced = result[5]
if result[8]:
language_code = result[2] + ":hi"
elif forced:
language_code = result[2] + ":forced"
else:
language_code = result[2]
provider = result[3]
score = result[4]
subs_id = result[6]
subs_path = result[7]
store_subtitles_movie(movie['path'])
history_log_movie(1, movie['movieId'], message, path, language_code, provider, score,
subs_id, subs_path)
event_stream(type='movie-wanted', action='delete', payload=movie['movieId'])
send_notifications_movie(movie['movieId'], message)
else:
logging.info(f"BAZARR Search is throttled by adaptive search for this episode {movie['path']} and "
f"language: {language}")
else:
logging.info("BAZARR All providers are throttled")
break
def wanted_search_missing_subtitles_series():
@ -1187,25 +1181,6 @@ def wanted_search_missing_subtitles_movies():
logging.info('BAZARR Finished searching for missing Movies Subtitles. Check History for more information.')
def search_active(timestamp):
if settings.general.getboolean('adaptive_searching'):
search_deadline = timedelta(weeks=3)
search_delta = timedelta(weeks=1)
aa = datetime.fromtimestamp(float(timestamp))
attempt_datetime = datetime.strptime(str(aa).split(".")[0], '%Y-%m-%d %H:%M:%S')
attempt_search_deadline = attempt_datetime + search_deadline
today = datetime.today()
attempt_age_in_days = (today.date() - attempt_search_deadline.date()).days
if today.date() <= attempt_search_deadline.date():
return True
elif attempt_age_in_days % search_delta.days == 0:
return True
else:
return False
else:
return True
def convert_to_guessit(guessit_key, attr_from_db):
try:
return guessit(attr_from_db)[guessit_key]
@ -1441,11 +1416,12 @@ def upgrade_subtitles():
count_movie_to_upgrade = len(movies_to_upgrade)
providers_list = get_providers()
providers_auth = get_providers_auth()
if settings.general.getboolean('use_series'):
for i, episode in enumerate(episodes_to_upgrade):
providers_list = get_providers()
show_progress(id='upgrade_episodes_progress',
header='Upgrading episodes subtitles...',
name='{0} - S{1:02d}E{2:02d} - {3}'.format(episode['seriesTitle'],
@ -1455,8 +1431,7 @@ def upgrade_subtitles():
value=i,
count=count_episode_to_upgrade)
providers = get_providers()
if not providers:
if not providers_list:
logging.info("BAZARR All providers are throttled")
return
if episode['language'].endswith('forced'):
@ -1512,17 +1487,15 @@ def upgrade_subtitles():
if settings.general.getboolean('use_movies'):
for i, movie in enumerate(movies_to_upgrade):
providers_list = get_providers()
show_progress(id='upgrade_movies_progress',
header='Upgrading movies subtitles...',
name=movie['title'],
value=i,
count=count_movie_to_upgrade)
providers = get_providers()
if not providers:
logging.info("BAZARR All providers are throttled")
return
if not providers:
if not providers_list:
logging.info("BAZARR All providers are throttled")
return
if movie['language'].endswith('forced'):
@ -1656,3 +1629,154 @@ def _get_scores(media_type, min_movie=None, min_ep=None):
min_ep = min_ep or (240 * 100 / handler.max_score)
min_score_ = int(min_ep if series else min_movie)
return handler.get_scores(min_score_)
def get_ban_list(profile_id):
if profile_id:
profile = get_profiles_list(profile_id)
if profile:
return {'must_contain': profile['mustContain'] or [],
'must_not_contain': profile['mustNotContain'] or []}
return None
def is_search_active(desired_language, attempt_string):
"""
Function to test if it's time to search again after a previous attempt matching the desired language. For 3 weeks,
we search on a scheduled basis but after 3 weeks we start searching only once a week.
@param desired_language: 2 letters language to search for in attempts
@type desired_language: str
@param attempt_string: string representation of a list of lists from database column failedAttempts
@type attempt_string: str
@return: return True if it's time to search again and False if not
@rtype: bool
"""
if settings.general.getboolean('adaptive_searching'):
logging.debug("Adaptive searching is enable, we'll see if it's time to search again...")
try:
# let's try to get a list of lists from the string representation in database
attempts = ast.literal_eval(attempt_string)
if type(attempts) is not list:
# attempts should be a list if not, it's malformed or None
raise ValueError
except ValueError:
logging.debug("Adaptive searching: attempts is malformed. As a failsafe, search will run.")
return True
if not len(attempts):
logging.debug("Adaptive searching: attempts list is empty, search will run.")
return True
# get attempts matching the desired language and sort them by timestamp ascending
matching_attempts = sorted([x for x in attempts if x[0] == desired_language], key=lambda x: x[1])
if not len(matching_attempts):
logging.debug("Adaptive searching: there's no attempts matching desired language, search will run.")
return True
else:
logging.debug(f"Adaptive searching: attempts matching language {desired_language}: {matching_attempts}")
# try to get the initial and latest search timestamp from matching attempts
initial_search_attempt = matching_attempts[0]
latest_search_attempt = matching_attempts[-1]
# try to parse the timestamps for those attempts
try:
initial_search_timestamp = datetime.fromtimestamp(initial_search_attempt[1])
latest_search_timestamp = datetime.fromtimestamp(latest_search_attempt[1])
except (OverflowError, ValueError, OSError):
logging.debug("Adaptive searching: unable to parse initial and latest search timestamps, search will run.")
return True
else:
logging.debug(f"Adaptive searching: initial search date for {desired_language} is "
f"{initial_search_timestamp}")
logging.debug(f"Adaptive searching: latest search date for {desired_language} is {latest_search_timestamp}")
# defining basic calculation variables
now = datetime.now()
if settings.general.adaptive_searching_delay.endswith('d'):
extended_search_delay = timedelta(days=int(settings.general.adaptive_searching_delay[:1]))
elif settings.general.adaptive_searching_delay.endswith('w'):
extended_search_delay = timedelta(weeks=int(settings.general.adaptive_searching_delay[:1]))
else:
logging.debug(f"Adaptive searching: cannot parse adaptive_searching_delay from config file: "
f"{settings.general.adaptive_searching_delay}")
return True
logging.debug(f"Adaptive searching: delay after initial search value: {extended_search_delay}")
if settings.general.adaptive_searching_delta.endswith('d'):
extended_search_delta = timedelta(days=int(settings.general.adaptive_searching_delta[:1]))
elif settings.general.adaptive_searching_delta.endswith('w'):
extended_search_delta = timedelta(weeks=int(settings.general.adaptive_searching_delta[:1]))
else:
logging.debug(f"Adaptive searching: cannot parse adaptive_searching_delta from config file: "
f"{settings.general.adaptive_searching_delta}")
return True
logging.debug(f"Adaptive searching: delta between latest search and now value: {extended_search_delta}")
if initial_search_timestamp + extended_search_delay > now:
logging.debug(f"Adaptive searching: it's been less than {settings.general.adaptive_searching_delay} since "
f"initial search, search will run.")
return True
else:
logging.debug(f"Adaptive searching: it's been more than {settings.general.adaptive_searching_delay} since "
f"initial search, let's check if it's time to search again.")
if latest_search_timestamp + extended_search_delta <= now:
logging.debug(
f"Adaptive searching: it's been more than {settings.general.adaptive_searching_delta} since "
f"latest search, search will run.")
return True
else:
logging.debug(
f"Adaptive searching: it's been less than {settings.general.adaptive_searching_delta} since "
f"latest search, we're not ready to search yet.")
return False
logging.debug("adaptive searching is disabled, search will run.")
return True
def updateFailedAttempts(desired_language, attempt_string):
"""
Function to parse attempts and make sure we only keep initial and latest search timestamp for each language.
@param desired_language: 2 letters language to search for in attempts
@type desired_language: str
@param attempt_string: string representation of a list of lists from database column failedAttempts
@type attempt_string: str
@return: return a string representation of a list of lists like [str(language_code), str(attempts)]
@rtype: str
"""
try:
# let's try to get a list of lists from the string representation in database
attempts = ast.literal_eval(attempt_string)
logging.debug(f"Adaptive searching: current attempts value is {attempts}")
if type(attempts) is not list:
# attempts should be a list if not, it's malformed or None
raise ValueError
except ValueError:
logging.debug("Adaptive searching: failed to parse attempts value, we'll use an empty list.")
attempts = []
matching_attempts = sorted([x for x in attempts if x[0] == desired_language], key=lambda x: x[1])
logging.debug(f"Adaptive searching: attempts matching language {desired_language}: {matching_attempts}")
filtered_attempts = sorted([x for x in attempts if x[0] != desired_language], key=lambda x: x[1])
logging.debug(f"Adaptive searching: attempts not matching language {desired_language}: {filtered_attempts}")
# get the initial search from attempts if there's one
if len(matching_attempts):
filtered_attempts.append(matching_attempts[0])
# append current attempt with language and timestamp to attempts
filtered_attempts.append([desired_language, datetime.timestamp(datetime.now())])
updated_attempts = sorted(filtered_attempts, key=lambda x: x[0])
logging.debug(f"Adaptive searching: updated attempts that will be saved to database is {updated_attempts}")
return str(updated_attempts)

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,6 @@
"bootstrap": "^4",
"lodash": "^4",
"moment": "^2.29.1",
"package.json": "^2.0.1",
"rc-slider": "^9.7",
"react": "^17",
"react-bootstrap": "^1",

View File

@ -33,6 +33,8 @@ declare namespace Language {
profileId: number;
cutoff: number | null;
items: ProfileItem[];
mustContain: string[];
mustNotContain: string[];
}
}

View File

@ -28,6 +28,8 @@ interface Settings {
declare namespace Settings {
interface General {
adaptive_searching: boolean;
adaptive_searching_delay: string;
adaptive_searching_delta: string;
anti_captcha_provider?: string;
auto_update: boolean;
base_url?: string;

View File

@ -5,6 +5,7 @@ import {
faUser,
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { uniqueId } from "lodash";
import React, { FunctionComponent, useMemo } from "react";
import {
Button,
@ -35,16 +36,22 @@ async function SearchItem(text: string) {
return results.map<SearchResult>((v) => {
let link: string;
let id: string;
if (v.seriesId) {
link = `/series/${v.seriesId}`;
id = `series-${v.seriesId}`;
} else if (v.movieId) {
link = `/movies/${v.movieId}`;
id = `movie-${v.movieId}`;
} else {
link = "";
id = uniqueId("unknown");
}
return {
name: `${v.title} (${v.year})`,
link,
id,
};
});
}

View File

@ -13,6 +13,7 @@ import {
ActionButton,
BaseModal,
BaseModalProps,
Chips,
LanguageSelector,
Selector,
SimpleTable,
@ -31,6 +32,8 @@ function createDefaultProfile(): Language.Profile {
name: "",
items: [],
cutoff: null,
mustContain: [],
mustNotContain: [],
};
}
@ -260,6 +263,28 @@ const LanguagesProfileModal: FunctionComponent<Props & BaseModalProps> = (
></Selector>
<Message>Ignore others if existing</Message>
</Input>
<Input name="Release info must contain">
<Chips
value={current.mustContain}
onChange={(mc) => updateProfile("mustContain", mc)}
></Chips>
<Message>
Subtitles release info must include one of those words or they will be
excluded from search results (regex supported).
</Message>
</Input>
<Input name="Release info must not contain">
<Chips
value={current.mustNotContain}
onChange={(mnc: string[]) => {
updateProfile("mustNotContain", mnc);
}}
></Chips>
<Message>
Subtitles release info including one of those words (case insensitive)
will be excluded from search results (regex supported).
</Message>
</Input>
</BaseModal>
);
};

View File

@ -94,6 +94,40 @@ const Table: FunctionComponent = () => {
});
},
},
{
Header: "Must contain",
accessor: "mustContain",
Cell: (row) => {
const items = row.value;
if (!items) {
return false;
}
return items.map((v) => {
return (
<Badge className={"mx-1"} variant={"secondary"}>
{v}
</Badge>
);
});
},
},
{
Header: "Must not contain",
accessor: "mustNotContain",
Cell: (row) => {
const items = row.value;
if (!items) {
return false;
}
return items.map((v) => {
return (
<Badge className={"mx-1"} variant={"secondary"}>
{v}
</Badge>
);
});
},
},
{
accessor: "profileId",
Cell: ({ row, update }) => {
@ -138,6 +172,8 @@ const Table: FunctionComponent = () => {
name: "",
items: [],
cutoff: null,
mustContain: [],
mustNotContain: [],
};
showModal("profile", profile);
}}

View File

@ -21,6 +21,10 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
defaultKey: {
username: "",
password: "",
vip: false,
},
keyNameOverride: {
vip: "VIP",
},
},
{ key: "argenteam", description: "LATAM Spanish Subtitles Provider" },
@ -46,6 +50,21 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
key: "bsplayer",
name: "BSplayer",
},
{
key: "embeddedsubtitles",
name: "Embedded Subtitles",
description: "Embedded Subtitles from your Media Files",
defaultKey: {
include_srt: true,
include_ass: true,
},
message:
"Warning for cloud users: this provider needs to read the entire file in order to extract subtitles.",
keyNameOverride: {
include_srt: "Include SRT",
include_ass: "Include ASS (will be converted to SRT)",
},
},
{
key: "greeksubs",
name: "GreekSubs",

View File

@ -10,7 +10,13 @@ import {
Slider,
Text,
} from "../components";
import { antiCaptchaOption, colorOptions, folderOptions } from "./options";
import {
adaptiveSearchingDelayOption,
adaptiveSearchingDeltaOption,
antiCaptchaOption,
colorOptions,
folderOptions,
} from "./options";
const subzeroOverride = (key: string) => {
return (settings: Settings) => {
@ -124,16 +130,44 @@ const SettingsSubtitlesView: FunctionComponent = () => {
</CollapseBox>
</Group>
<Group header="Performance / Optimization">
<Input>
<Check
label="Adaptive Searching"
settingKey="settings-general-adaptive_searching"
></Check>
<Message>
When searching for subtitles, Bazarr will search less frequently to
limit call to providers.
</Message>
</Input>
<CollapseBox>
<CollapseBox.Control>
<Input>
<Check
label="Adaptive Searching"
settingKey="settings-general-adaptive_searching"
></Check>
<Message>
When searching for subtitles, Bazarr will reduce search
frequency to limit call to providers.
</Message>
</Input>
</CollapseBox.Control>
<CollapseBox.Content>
<Input>
<Selector
settingKey="settings-general-adaptive_searching_delay"
beforeStaged={(v) => (v === undefined ? "3w" : v)}
options={adaptiveSearchingDelayOption}
></Selector>
<Message>
How much weeks must Bazarr wait after initial search to reduce
search frequency.
</Message>
</Input>
<Input>
<Selector
settingKey="settings-general-adaptive_searching_delta"
beforeStaged={(v) => (v === undefined ? "1w" : v)}
options={adaptiveSearchingDeltaOption}
></Selector>
<Message>
How often should Bazarr search for subtitles when in adaptive
search mode.
</Message>
</Input>
</CollapseBox.Content>
</CollapseBox>
<Input>
<Check
label="Search Enabled Providers Simultaneously"

View File

@ -24,6 +24,48 @@ export const antiCaptchaOption: SelectorOption<string>[] = [
},
];
export const adaptiveSearchingDelayOption: SelectorOption<string>[] = [
{
label: "1 week",
value: "1w",
},
{
label: "2 weeks",
value: "2w",
},
{
label: "3 weeks",
value: "3w",
},
{
label: "4 weeks",
value: "4w",
},
];
export const adaptiveSearchingDeltaOption: SelectorOption<string>[] = [
{
label: "3 days",
value: "3d",
},
{
label: "1 week",
value: "1w",
},
{
label: "2 weeks",
value: "2w",
},
{
label: "3 weeks",
value: "3w",
},
{
label: "4 weeks",
value: "4w",
},
];
function buildColor(name: string) {
return `color(name=${name})`;
}

View File

@ -176,11 +176,11 @@ export const Chips: FunctionComponent<ChipsProp> = (props) => {
const update = useSingleUpdate();
const defaultValue = useLatest<string[]>(settingKey, isArray, override);
const value = useLatest<string[]>(settingKey, isArray, override);
return (
<CChips
defaultValue={defaultValue ?? undefined}
value={value ?? undefined}
onChange={(v) => {
update(v, settingKey);
}}

View File

@ -10,6 +10,7 @@ import { useHistory } from "react-router";
import { useThrottle } from "rooks";
export interface SearchResult {
id: string;
name: string;
link?: string;
}
@ -58,7 +59,7 @@ export const SearchBar: FunctionComponent<Props> = ({
const items = useMemo(() => {
const its = results.map((v) => (
<Dropdown.Item
key={v.name}
key={v.id}
eventKey={v.link}
disabled={v.link === undefined}
>

View File

@ -3,6 +3,7 @@ import React, {
FunctionComponent,
KeyboardEvent,
useCallback,
useEffect,
useMemo,
useRef,
useState,
@ -14,15 +15,31 @@ const SplitKeys = ["Tab", "Enter", " ", ",", ";"];
export interface ChipsProps {
disabled?: boolean;
defaultValue?: readonly string[];
value?: readonly string[];
onChange?: (v: string[]) => void;
}
export const Chips: FunctionComponent<ChipsProps> = ({
defaultValue,
value,
disabled,
onChange,
}) => {
const [chips, setChips] = useState(defaultValue ?? []);
const [chips, setChips] = useState<Readonly<string[]>>(() => {
if (value) {
return value;
}
if (defaultValue) {
return defaultValue;
}
return [];
});
useEffect(() => {
if (value) {
setChips(value);
}
}, [value]);
const input = useRef<HTMLInputElement>(null);

401
libs/fese/__init__.py Executable file
View File

@ -0,0 +1,401 @@
# -*- coding: utf-8 -*-
# License: GPL
from __future__ import annotations
import json
import logging
import os
import re
import subprocess
from typing import List, Optional
from babelfish import Language
from babelfish.exceptions import LanguageError
import pysubs2
__version__ = "0.1.0"
logger = logging.getLogger(__name__)
# Paths to executables
FFPROBE_PATH = os.environ.get("FFPROBE_PATH", "ffprobe")
FFMPEG_PATH = os.environ.get("FFMPEG_PATH", "ffmpeg")
FFMPEG_STATS = True
FF_LOG_LEVEL = "quiet"
class FeseError(Exception):
pass
class ExtractionError(FeseError):
pass
class InvalidFile(FeseError):
pass
class InvalidStream(FeseError):
pass
class InvalidSource(FeseError):
pass
class ConversionError(FeseError):
pass
class LanguageNotFound(FeseError):
pass
# Extensions
SRT = "srt"
ASS = "ass"
class FFprobeSubtitleDisposition:
def __init__(self, data: dict):
self.default = False
self.generic = False
self.dub = False
self.original = False
self.comment = False
self.lyrics = False
self.karaoke = False
self.forced = False
self.hearing_impaired = False
self.visual_impaired = False
self.clean_effects = False
self.attached_pic = False
self.timed_thumbnails = False
self._content_type = None
for key, val in data.items():
if hasattr(self, key):
setattr(self, key, bool(val))
def update_from_tags(self, tags):
tag_title = tags.get("title")
if tag_title is None:
logger.debug("Title not found. Marking as generic")
self.generic = True
return None
l_tag_title = tag_title.lower()
for key, val in _content_types.items():
if val.search(l_tag_title) is not None:
logger.debug("Found %s: %s", key, l_tag_title)
self._content_type = key
setattr(self, key, True)
return None
logger.debug("Generic disposition title found: %s", l_tag_title)
self.generic = True
return None
@property
def suffix(self):
if self._content_type is not None:
return f"-{self._content_type}"
return ""
def __str__(self):
return self.suffix.lstrip("-").upper() or "GENERIC"
class FFprobeSubtitleStream:
"""Base class for FFprobe (FFmpeg) extractable subtitle streams."""
def __init__(self, stream: dict):
"""
:raises: LanguageNotFound
"""
self.index = int(stream.get("index", 0))
self.codec_name = stream.get("codec_name", "Unknown")
self.extension = _subtitle_extensions.get(self.codec_name, self.codec_name)
self.r_frame_rate = stream.get("r_frame_rate")
self.avg_frame_rate = stream.get("avg_frame_rate")
self.time_base = stream.get("time_base")
self.tags = stream.get("tags", {})
self.duration = float(stream.get("duration", 0))
self.start_time = float(stream.get("start_time", 0))
self.duration_ts = int(stream.get("duration_ts", 0))
self.start_pts = int(stream.get("start_pts", 0))
self.disposition = FFprobeSubtitleDisposition(stream.get("disposition", {}))
if self.tags:
self.disposition.update_from_tags(self.tags)
self.language: Language = self._language()
@property
def suffix(self):
lang = self.language.alpha2
if self.language.country is not None:
lang = f"{lang}-{self.language.country}"
return f"{lang}{self.disposition.suffix}.{self.extension}"
def _language(self) -> Language:
og_lang = self.tags.get("language")
if og_lang is not None:
if og_lang in _extra_languages:
extra = _extra_languages[og_lang]
title = self.tags.get("title", "n/a").lower()
if any(possible in title for possible in extra["matches"]):
logger.debug("Found extra language %s", extra["language_args"])
return Language(*extra["language_args"])
try:
return Language.fromalpha3b(og_lang)
except LanguageError as error:
logger.debug("Error with '%s' language: %s", og_lang, error)
raise LanguageNotFound(f"Couldn't detect language for stream: {self.tags}")
def __repr__(self) -> str:
return f"<{self.codec_name.upper()}: {self.language}@{self.disposition}>"
# Helpers
class FFprobeVideoContainer:
def __init__(self, path: str):
self.path = path
@property
def extension(self):
return os.path.splitext(self.path)[-1].lstrip(".")
def get_subtitles(self, timeout: int = 600) -> List[FFprobeSubtitleStream]:
"""Factory function to create subtitle instances from FFprobe.
:param timeout: subprocess timeout in seconds (default: 600)
:raises: InvalidSource"""
ff_command = [
FFPROBE_PATH,
"-v",
FF_LOG_LEVEL,
"-print_format",
"json",
"-show_format",
"-show_streams",
self.path,
]
try:
result = subprocess.run(
ff_command, stdout=subprocess.PIPE, check=True, timeout=timeout
)
streams = json.loads(result.stdout)["streams"]
except _ffprobe_exceptions as error:
raise InvalidSource(
f"{error} trying to get information from {self.path}"
) from error # We want to see the traceback
subs = []
for stream in streams:
if stream.get("codec_type", "n/a") != "subtitle":
continue
try:
subs.append(FFprobeSubtitleStream(stream))
except LanguageNotFound:
pass
if not subs:
logger.debug("Source doesn't have any subtitle valid streams")
return []
logger.debug("Found subtitle streams: %s", subs)
return subs
def extract_subtitles(
self,
subtitles: List[FFprobeSubtitleStream],
custom_dir=None,
overwrite=True,
timeout=600,
):
"""Extracts a list of subtitles. Returns a dictionary of the extracted
filenames by index.
:param subtitles: a list of FFprobeSubtitle instances
:param custom_dir: a custom directory to save the subtitles. Defaults to
same directory as the media file
:param overwrite: overwrite files with the same name (default: True)
:param timeout: subprocess timeout in seconds (default: 600)
:raises: ExtractionError, OSError
"""
extract_command = [FFMPEG_PATH, "-v", FF_LOG_LEVEL]
if FFMPEG_STATS:
extract_command.append("-stats")
extract_command.extend(["-y", "-i", self.path])
if custom_dir is not None:
# May raise OSError
os.makedirs(custom_dir, exist_ok=True)
items = {}
collected_paths = set()
for subtitle in subtitles:
sub_path = f"{os.path.splitext(self.path)[0]}.{subtitle.suffix}"
if custom_dir is not None:
sub_path = os.path.join(custom_dir, os.path.basename(sub_path))
if sub_path in collected_paths:
sub_path = (
f"{sub_path.rstrip(f'.{subtitle.suffix}')}"
f"-{len(collected_paths)}.{subtitle.suffix}"
)
if not overwrite and os.path.isfile(sub_path):
logger.debug("Ignoring path (OVERWRITE TRUE): %s", sub_path)
continue
extract_command.extend(
["-map", f"0:{subtitle.index}", "-c", "copy", sub_path]
)
logger.debug("Appending subtitle path: %s", sub_path)
collected_paths.add(sub_path)
items[subtitle.index] = sub_path
if not items:
logger.debug("No subtitles to extract")
return {}
logger.debug("Extracting subtitle with command %s", " ".join(extract_command))
try:
subprocess.run(extract_command, timeout=timeout, check=True)
except (subprocess.SubprocessError, FileNotFoundError) as error:
raise ExtractionError(f"Error calling ffmpeg: {error}") from error
for path in items.values():
if not os.path.isfile(path):
logger.debug("%s was not extracted", path)
return items
def __repr__(self) -> str:
return f"<FFprobeVideoContainer {self.extension}: {self.path}>"
def check_integrity(
subtitle: FFprobeSubtitleStream, path: str, sec_offset_threshold=900
):
"""A relative check for the integriy of a file. This can be used to find a failed
ffmpeg extraction where the final file might not be complete or might be corrupted.
Currently, only ASS and Subrip are supported.
:param subtitle: FFprobeSubtitle instance
:param path: the path of the subtitle file (ass or srt)
:param sec_offset_threshold: the maximum seconds offset to determine if the file is complete
:raises: InvalidFile
"""
if subtitle.extension not in (ASS, SRT):
raise InvalidFile(f"Extension not supported: {subtitle.extension}")
try:
sub = pysubs2.load(path)
except (pysubs2.Pysubs2Error, UnicodeError, OSError, FileNotFoundError) as error:
raise InvalidFile(error) from error
else:
off = abs(int(sub[-1].end) - subtitle.duration_ts)
if off > abs(sec_offset_threshold) * 1000:
raise InvalidFile(
f"The last subtitle timestamp ({sub[-1].end/1000} sec) is {off/1000} sec ahead"
f" from the subtitle stream total duration ({subtitle.duration} sec)"
)
logger.debug("Integrity check passed (%d sec offset)", off / 1000)
def to_srt(
source: str, output: Optional[str] = None, remove_source: bool = False
) -> str:
"""Convert a subtitle to SubRip. Currently, only ASS is supported. SubRip
files will be silently ignored.
raises: ConversionError, OSError"""
if source.endswith(".srt"):
return source
split_path = os.path.splitext(source)
if split_path[-1] not in (".ass"):
raise ConversionError(
f"No converter found for extension: {split_path[-1]}"
) from None
output = output or f"{split_path[0]}.srt"
try:
parsed = pysubs2.load(source)
parsed.save(output)
except (pysubs2.Pysubs2Error, UnicodeError) as error:
raise ConversionError(f"Exception converting {output}: {error}") from error
logger.debug("Converted: %s", output)
if remove_source and source != output:
try:
os.remove(source)
except OSError as error:
logger.debug("Can't remove source: %s (%s)", source, error)
return output
_subtitle_extensions = {"subrip": "srt", "ass": "ass"}
_content_types = {
"hearing_impaired": re.compile(r"sdh|hearing impaired"),
"forced": re.compile(r"forced"),
"comment": re.compile(r"comment"),
"visual_impaired": re.compile(r"signs|visual impair"),
"karaoke": re.compile(r"karaoke|songs"),
}
_ffprobe_exceptions = (
subprocess.SubprocessError,
json.JSONDecodeError,
FileNotFoundError,
KeyError,
)
_extra_languages = {
"spa": {
"matches": (
"es-la",
"spa-la",
"spl",
"mx",
"latin",
"mexic",
"argent",
"latam",
),
"language_args": ("spa", "MX"),
},
"por": {
"matches": ("pt-br", "pob", "pb", "brazilian", "brasil", "brazil"),
"language_args": ("por", "BR"),
},
}

View File

@ -50,6 +50,8 @@ class Subtitle(object):
#: Encoding to decode with when accessing :attr:`text`
self.encoding = None
self.release_info = None
# validate the encoding
if encoding:
try:

View File

@ -66,7 +66,7 @@ def remove_crap_from_fn(fn):
class SZProviderPool(ProviderPool):
def __init__(self, providers=None, provider_configs=None, blacklist=None, throttle_callback=None,
def __init__(self, providers=None, provider_configs=None, blacklist=None, ban_list=None, throttle_callback=None,
pre_download_hook=None, post_download_hook=None, language_hook=None):
#: Name of providers to use
self.providers = providers
@ -82,6 +82,9 @@ class SZProviderPool(ProviderPool):
self.blacklist = blacklist or []
#: Should be a dict of 2 lists of strings
self.ban_list = ban_list or {'must_contain': [], 'must_not_contain': []}
self.throttle_callback = throttle_callback
self.pre_download_hook = pre_download_hook
@ -184,6 +187,15 @@ class SZProviderPool(ProviderPool):
if (str(provider), str(s.id)) in self.blacklist:
logger.info("Skipping blacklisted subtitle: %s", s)
continue
if s.release_info is not None:
if any([x for x in self.ban_list["must_not_contain"]
if re.search(x, s.release_info, flags=re.IGNORECASE) is not None]):
logger.info("Skipping subtitle because release name contains prohibited string: %s", s)
continue
if any([x for x in self.ban_list["must_contain"]
if re.search(x, s.release_info, flags=re.IGNORECASE) is None]):
logger.info("Skipping subtitle because release name does not contains required string: %s", s)
continue
if s.id in seen:
continue
s.plex_media_fps = float(video.fps) if video.fps else None
@ -506,7 +518,7 @@ class SZAsyncProviderPool(SZProviderPool):
return provider, provider_subtitles
def list_subtitles(self, video, languages, blacklist=None):
def list_subtitles(self, video, languages, blacklist=None, ban_list=None):
if is_windows_special_path:
return super(SZAsyncProviderPool, self).list_subtitles(video, languages)

View File

@ -8,6 +8,7 @@ import time
from random import randint
from urllib.parse import quote_plus
import babelfish
from dogpile.cache.api import NO_VALUE
from requests import Session
from subliminal.cache import region
@ -23,6 +24,8 @@ from subzero.language import Language
logger = logging.getLogger(__name__)
show_cells_re = re.compile(b'<td class="(?:version|vr)">.*?</td>', re.DOTALL)
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$')
@ -37,7 +40,8 @@ class Addic7edSubtitle(_Addic7edSubtitle):
download_link, uploader=None):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired, page_link, series, season, episode,
title, year, version, download_link)
self.release_info = version.replace('+', ',') if version else None
# Guessit will fail if the input is None
self.release_info = version.replace('+', ',') if version else ""
self.uploader = uploader
def get_matches(self, video):
@ -68,6 +72,7 @@ class Addic7edProvider(_Addic7edProvider):
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
video_types = (Episode, Movie)
vip = False
USE_ADDICTED_RANDOM_AGENTS = False
hearing_impaired_verifiable = True
subtitle_class = Addic7edSubtitle
@ -76,9 +81,10 @@ class Addic7edProvider(_Addic7edProvider):
sanitize_characters = {'-', ':', '(', ')', '.', '/'}
last_show_ids_fetch_key = "addic7ed_last_id_fetch"
def __init__(self, username=None, password=None, use_random_agents=False):
def __init__(self, username=None, password=None, use_random_agents=False, is_vip=False):
super(Addic7edProvider, self).__init__(username=username, password=password)
self.USE_ADDICTED_RANDOM_AGENTS = use_random_agents
self.vip = is_vip
if not all((username, password)):
raise ConfigurationError('Username and password must be specified')
@ -95,7 +101,7 @@ class Addic7edProvider(_Addic7edProvider):
# login
if self.username and self.password:
def check_verification(cache_region):
rr = self.session.get(self.server_url + 'panel.php', allow_redirects=False, timeout=60,
rr = self.session.get(self.server_url + 'panel.php', allow_redirects=False, timeout=10,
headers={"Referer": self.server_url})
if rr.status_code == 302:
logger.info('Addic7ed: Login expired')
@ -115,7 +121,7 @@ class Addic7edProvider(_Addic7edProvider):
tries = 0
while tries <= 3:
tries += 1
r = self.session.get(self.server_url + 'login.php', timeout=60, headers={"Referer": self.server_url})
r = self.session.get(self.server_url + 'login.php', timeout=10, headers={"Referer": self.server_url})
if "g-recaptcha" in r.text or "grecaptcha" in r.text:
logger.info('Addic7ed: Solving captcha. This might take a couple of minutes, but should only '
'happen once every so often')
@ -139,11 +145,13 @@ class Addic7edProvider(_Addic7edProvider):
if tries >= 3:
raise Exception("Addic7ed: Couldn't solve captcha!")
logger.info("Addic7ed: Couldn't solve captcha! Retrying")
time.sleep(4)
continue
data[g] = result
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=60,
time.sleep(1)
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10,
headers={"Referer": self.server_url + "login.php"})
if "relax, slow down" in r.text:
@ -157,6 +165,7 @@ class Addic7edProvider(_Addic7edProvider):
logger.error("Addic7ed: Something went wrong when logging in")
raise AuthenticationError(self.username)
logger.info("Addic7ed: Something went wrong when logging in; retrying")
time.sleep(4)
continue
break
@ -165,6 +174,8 @@ class Addic7edProvider(_Addic7edProvider):
logger.debug('Addic7ed: Logged in')
self.logged_in = True
time.sleep(2)
def terminate(self):
self.session.close()
@ -238,7 +249,7 @@ class Addic7edProvider(_Addic7edProvider):
# get the movie id
logger.info('Getting movie id')
r = self.session.get(self.server_url + 'search.php?search=' + quote_plus(movie), timeout=60)
r = self.session.get(self.server_url + 'search.php?search=' + quote_plus(movie), timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
@ -285,10 +296,18 @@ class Addic7edProvider(_Addic7edProvider):
logger.info('Getting show ids')
region.set(self.last_show_ids_fetch_key, datetime.datetime.now())
r = self.session.get(self.server_url + 'shows.php', timeout=60)
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
# LXML parser seems to fail when parsing Addic7ed.com HTML markup.
# Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)
# Assuming the site's markup is bad, and stripping it down to only contain what's needed.
show_cells = [cell.decode("utf-8", "ignore") for cell in re.findall(show_cells_re, r.content)]
if show_cells:
soup = ParserBeautifulSoup(''.join(show_cells), ['lxml', 'html.parser'])
else:
# If RegEx fails, fall back to original r.content and use 'html.parser'
soup = ParserBeautifulSoup(r.content, ['html.parser'])
# populate the show ids
show_ids = {}
@ -345,7 +364,7 @@ class Addic7edProvider(_Addic7edProvider):
headers = {
"referer": self.server_url + "srch.php"
}
r = self.session.get(self.server_url + endpoint, params=params, timeout=60, headers=headers)
r = self.session.get(self.server_url + endpoint, params=params, timeout=10, headers=headers)
r.raise_for_status()
if r.text and "Sorry, your search" not in r.text:
@ -386,7 +405,7 @@ class Addic7edProvider(_Addic7edProvider):
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'ajax_loadShow.php',
params={'show': show_id, 'season': season},
timeout=60,
timeout=10,
headers={
"referer": "%sshow/%s" % (self.server_url, show_id),
"X-Requested-With": "XMLHttpRequest"
@ -418,7 +437,12 @@ class Addic7edProvider(_Addic7edProvider):
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
try:
language = Language.fromaddic7ed(cells[3].text)
except babelfish.exceptions.LanguageReverseError as error:
logger.debug("Language error: %s, Ignoring subtitle", error)
continue
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
@ -444,9 +468,9 @@ class Addic7edProvider(_Addic7edProvider):
def query_movie(self, movie_id, title, year=None):
# get the page of the movie
logger.info('Getting the page of movie id %d', movie_id)
logger.info('Getting the page of movie id %s', movie_id)
r = self.session.get(self.server_url + 'movie/' + movie_id,
timeout=60,
timeout=10,
headers={
"referer": self.server_url,
"X-Requested-With": "XMLHttpRequest"
@ -488,11 +512,24 @@ class Addic7edProvider(_Addic7edProvider):
continue
# read the item
language = Language.fromaddic7ed(row2.contents[4].text.strip('\n'))
try:
language = Language.fromaddic7ed(row2.contents[4].text.strip('\n'))
except babelfish.exceptions.LanguageReverseError as error:
logger.debug("Language error: %s, Ignoring subtitle", error)
continue
hearing_impaired = bool(row3.contents[1].contents[1].attrs['src'].endswith('hi.jpg'))
page_link = self.server_url + 'movie/' + movie_id
version_matches = re.search(r'Version\s(.+),.+', str(row1.contents[1].contents[1]))
version = version_matches.group(1) if version_matches else None
# Seems like Addic7ed returns the first word in the language of the user (Version, Versión, etc)
# As we can't match a regex, we will just strip the first word
try:
version = " ".join(str(row1.contents[1].contents[1]).split()[1:])
version_matches = re.search(r"(.+),.+", version)
version = version_matches.group(1) if version_matches else None
except IndexError:
version = None
try:
download_link = row2.contents[8].contents[3].attrs['href'][1:]
except IndexError:
@ -550,9 +587,30 @@ class Addic7edProvider(_Addic7edProvider):
return []
def download_subtitle(self, subtitle):
last_dls = region.get("addic7ed_dls")
now = datetime.datetime.now()
one_day = datetime.timedelta(hours=24)
def raise_limit():
logger.info("Addic7ed: Downloads per day exceeded (%s)", cap)
raise DownloadLimitPerDayExceeded
if type(last_dls) is not list:
last_dls = []
else:
# filter all non-expired DLs
last_dls = list(filter(lambda t: t + one_day > now, last_dls))
region.set("addic7ed_dls", last_dls)
cap = self.vip and 80 or 40
amount = len(last_dls)
if amount >= cap:
raise_limit()
# download the subtitle
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=60)
timeout=10)
r.raise_for_status()
if r.status_code == 304:
@ -569,3 +627,9 @@ class Addic7edProvider(_Addic7edProvider):
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
last_dls.append(datetime.datetime.now())
region.set("addic7ed_dls", last_dls)
logger.info("Addic7ed: Used %s/%s downloads", amount + 1, cap)
if amount + 1 >= cap:
raise_limit()

View File

@ -36,6 +36,7 @@ class BetaSeriesSubtitle(Subtitle):
self.matches = matches
self.source = source
self.video_release_group = video_release_group
self.release_info = video_name
@property
def id(self):
@ -45,10 +46,6 @@ class BetaSeriesSubtitle(Subtitle):
def download_link(self):
return self.download_url
@property
def release_info(self):
return self.video_name
def get_matches(self, video):
matches = self.matches

View File

@ -36,15 +36,12 @@ class BSPlayerSubtitle(Subtitle):
self.subtype = subtype
self.video = video
self.subid = subid
self.release_info = filename
@property
def id(self):
return self.subid
@property
def release_info(self):
return self.filename
def get_matches(self, video):
matches = set()
matches |= guess_matches(video, guessit(self.filename))

View File

@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
import logging
import os
import shutil
import tempfile
from babelfish import language_converters
import fese
from fese import check_integrity
from fese import FFprobeSubtitleStream
from fese import FFprobeVideoContainer
from fese import to_srt
from subliminal.subtitle import fix_line_ending
from subliminal_patch.core import Episode
from subliminal_patch.core import Movie
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
logger = logging.getLogger(__name__)
# Replace Babelfish's Language with Subzero's Language
fese.Language = Language
class EmbeddedSubtitle(Subtitle):
provider_name = "embeddedsubtitles"
hash_verifiable = False
def __init__(self, stream, container, matches):
super().__init__(stream.language, stream.disposition.hearing_impaired)
if stream.disposition.forced:
self.language = Language.rebuild(stream.language, forced=True)
self.stream: FFprobeSubtitleStream = stream
self.container: FFprobeVideoContainer = container
self.forced = stream.disposition.forced
self._matches: set = matches
self.page_link = self.container.path
self.release_info = os.path.basename(self.page_link)
def get_matches(self, video):
if self.hearing_impaired:
self._matches.add("hearing_impaired")
self._matches.add("hash")
return self._matches
@property
def id(self):
return f"{self.container.path}_{self.stream.index}"
class EmbeddedSubtitlesProvider(Provider):
provider_name = "embeddedsubtitles"
languages = {Language("por", "BR"), Language("spa", "MX")} | {
Language.fromalpha2(l) for l in language_converters["alpha2"].codes
}
languages.update(set(Language.rebuild(lang, hi=True) for lang in languages))
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
video_types = (Episode, Movie)
subtitle_class = EmbeddedSubtitle
def __init__(
self,
include_ass=True,
include_srt=True,
cache_dir=None,
ffprobe_path=None,
ffmpeg_path=None,
):
self._include_ass = include_ass
self._include_srt = include_srt
self._cache_dir = os.path.join(
cache_dir or tempfile.gettempdir(), self.__class__.__name__.lower()
)
self._cached_paths = {}
fese.FFPROBE_PATH = ffprobe_path or fese.FFPROBE_PATH
fese.FFMPEG_PATH = ffmpeg_path or fese.FFMPEG_PATH
if logger.getEffectiveLevel() == logging.DEBUG:
fese.FF_LOG_LEVEL = "warning"
else:
# Default is True
fese.FFMPEG_STATS = False
def initialize(self):
os.makedirs(self._cache_dir, exist_ok=True)
def terminate(self):
# Remove leftovers
shutil.rmtree(self._cache_dir, ignore_errors=True)
def query(self, path: str, languages):
video = FFprobeVideoContainer(path)
try:
streams = filter(_check_allowed_extensions, video.get_subtitles())
except fese.InvalidSource as error:
logger.error("Error trying to get subtitles for %s: %s", video, error)
streams = []
if not streams:
logger.debug("No subtitles found for container: %s", video)
only_forced = all(lang.forced for lang in languages)
also_forced = any(lang.forced for lang in languages)
subtitles = []
for stream in streams:
if not self._include_ass and stream.extension == "ass":
logger.debug("Ignoring ASS: %s", stream)
continue
if not self._include_srt and stream.extension == "srt":
logger.debug("Ignoring SRT: %s", stream)
continue
if stream.language not in languages:
continue
disposition = stream.disposition
if only_forced and not disposition.forced:
continue
if (
disposition.generic
or disposition.hearing_impaired
or (disposition.forced and also_forced)
):
logger.debug("Appending subtitle: %s", stream)
subtitles.append(EmbeddedSubtitle(stream, video, {"hash"}))
else:
logger.debug("Ignoring unwanted subtitle: %s", stream)
return subtitles
def list_subtitles(self, video, languages):
if not os.path.isfile(video.original_path):
logger.debug("Ignoring inexistent file: %s", video.original_path)
return []
return self.query(video.original_path, languages)
def download_subtitle(self, subtitle):
path = self._get_subtitle_path(subtitle)
with open(path, "rb") as sub:
content = sub.read()
subtitle.content = fix_line_ending(content)
def _get_subtitle_path(self, subtitle: EmbeddedSubtitle):
container = subtitle.container
# Check if the container is not already in the instance
if container.path not in self._cached_paths:
# Extract all subittle streams to avoid reading the entire
# container over and over
streams = filter(_check_allowed_extensions, container.get_subtitles())
extracted = container.extract_subtitles(list(streams), self._cache_dir)
# Add the extracted paths to the containter path key
self._cached_paths[container.path] = extracted
cached_path = self._cached_paths[container.path]
# Get the subtitle file by index
subtitle_path = cached_path[subtitle.stream.index]
check_integrity(subtitle.stream, subtitle_path)
# Convert to SRT if the subtitle is ASS
new_subtitle_path = to_srt(subtitle_path, remove_source=True)
if new_subtitle_path != subtitle_path:
cached_path[subtitle.stream.index] = new_subtitle_path
return new_subtitle_path
def _check_allowed_extensions(subtitle: FFprobeSubtitleStream):
return subtitle.extension in ("ass", "srt")

View File

@ -28,6 +28,7 @@ class GreekSubtitlesSubtitle(Subtitle):
self.download_link = download_link
self.hearing_impaired = None
self.encoding = 'windows-1253'
self.release_info = version
@property
def id(self):

View File

@ -50,6 +50,7 @@ class KtuvitSubtitle(Subtitle):
self.ktuvit_id = ktuvit_id
self.subtitle_id = subtitle_id
self.release = release
self.release_info = release
def __repr__(self):
return "<%s [%s] %r [%s:%s]>" % (
@ -64,10 +65,6 @@ class KtuvitSubtitle(Subtitle):
def id(self):
return str(self.subtitle_id)
@property
def release_info(self):
return self.release
def get_matches(self, video):
matches = set()
# episode

View File

@ -42,15 +42,12 @@ class LegendasdivxSubtitle(Subtitle):
self.uploader = data['uploader']
self.wrong_fps = False
self.skip_wrong_fps = skip_wrong_fps
self.release_info = self.description
@property
def id(self):
return self.page_link
@property
def release_info(self):
return self.description
def get_matches(self, video):
matches = set()

View File

@ -26,6 +26,7 @@ class Napisy24Subtitle(Subtitle):
self.hash = hash
self.imdb_id = imdb_id
self.napis_id = napis_id
self.release_info = '' # TODO Try to get the release info from parsing the page
@property
def id(self):

View File

@ -39,6 +39,7 @@ class NekurSubtitle(Subtitle):
self.fps = fps
self.notes = notes
self.matches = None
self.release_info = notes
@property
def id(self):

View File

@ -29,15 +29,12 @@ class RegieLiveSubtitle(Subtitle):
self.video = video
self.rating = rating
self.language = language
self.release_info = filename
@property
def id(self):
return self.page_link
@property
def release_info(self):
return self.filename
def get_matches(self, video):
type_ = "movie" if isinstance(video, Movie) else "episode"
matches = set()

View File

@ -35,6 +35,7 @@ class SubtitriIdSubtitle(Subtitle):
self.year = year
self.imdb_id = imdb_id
self.matches = None
self.release_info = '' # TODO Try to get the release info from parsing the page
@property
def id(self):

View File

@ -35,15 +35,12 @@ class WizdomSubtitle(Subtitle):
self.imdb_id = imdb_id
self.subtitle_id = subtitle_id
self.release = release
self.release_info = release
@property
def id(self):
return str(self.subtitle_id)
@property
def release_info(self):
return self.release
def get_matches(self, video):
matches = set()
# episode

View File

@ -39,6 +39,7 @@ class XSubsSubtitle(Subtitle):
self.download_link = download_link
self.hearing_impaired = None
self.encoding = 'windows-1253'
self.release_info = version
@property
def id(self):

View File

@ -28,3 +28,4 @@ class Video(Video_):
self.external_subtitle_languages = set()
self.streaming_service = streaming_service
self.edition = edition
self.original_path = name

View File

@ -12,6 +12,7 @@ deep-translator=1.5.4
dogpile.cache=0.6.5
engineio=4.3.0
enzyme=0.4.1
fese=0.1.0
ffsubsync=0.4.11
Flask=1.1.1
flask-restful=0.3.8

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,70 @@
#!/usr/bin/env python3
import os
import pytest
import datetime
import tempfile
import subliminal
from subliminal_patch.providers.addic7ed import Addic7edProvider
from subliminal_patch.providers.addic7ed import Addic7edSubtitle
from dogpile.cache.region import register_backend as register_cache_backend
from subzero.language import Language
_ENV_VARS = (
"ANTICAPTCHA_CLASS",
"ANTICAPTCHA_ACCOUNT_KEY",
"ADDIC7ED_USERNAME",
"ADDIC7ED_PASSWORD",
)
def _can_run():
for env_var in _ENV_VARS:
if not os.environ.get(env_var):
return True
return False
pytestmark = pytest.mark.skipif(
_can_run(), reason=f"Some environment variables not set: {_ENV_VARS}"
)
@pytest.fixture(scope="session")
def region():
register_cache_backend(
"subzero.cache.file", "subzero.cache_backends.file", "SZFileBackend"
)
subliminal.region.configure(
"subzero.cache.file",
expiration_time=datetime.timedelta(days=30),
arguments={"appname": "sz_cache", "app_cache_dir": tempfile.gettempdir()},
)
subliminal.region.backend.sync()
def test_list_subtitles_episode(region, episodes):
item = episodes["breaking_bad_s01e01"]
language = Language("eng")
with Addic7edProvider(
os.environ["ADDIC7ED_USERNAME"], os.environ["ADDIC7ED_PASSWORD"]
) as provider:
subtitles = provider.list_subtitles(item, {language})
assert len(subtitles) == 6
subliminal.region.backend.sync()
def test_list_subtitles_movie(region, movies):
item = movies["dune"]
language = Language("eng")
with Addic7edProvider(
os.environ["ADDIC7ED_USERNAME"], os.environ["ADDIC7ED_PASSWORD"]
) as provider:
subtitles = provider.list_subtitles(item, {language})
assert len(subtitles) == 2
subliminal.region.backend.sync()

View File

@ -0,0 +1,156 @@
# -*- coding: utf-8 -*-
import os
import fese
import pytest
from subliminal_patch.core import Episode, Movie
from subliminal_patch.providers.embeddedsubtitles import EmbeddedSubtitlesProvider
from subzero.language import Language
_DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
fese.Language = Language
@pytest.fixture
def video_single_language():
# Has only ASS streams in english
return Episode(
os.path.join(_DATA, "file_1.mkv"),
"Serial Experiments Lain",
1,
1,
source="Web",
)
@pytest.fixture
def video_multiple_languages():
# Has SubRip streams in multiple languages
return Movie(
os.path.join(_DATA, "file_2.mkv"),
"I'm No Longer Here",
year=2019,
source="Web",
)
@pytest.fixture
def config(tmpdir):
return {
"include_ass": True,
"include_srt": True,
"cache_dir": tmpdir,
"ffprobe_path": None,
"ffmpeg_path": None,
}
@pytest.fixture
def video_inexistent(tmpdir):
return Movie(
os.path.join(tmpdir, "inexistent_video.mkv"),
"Dummy",
year=2021,
source="Web",
)
def test_init(config):
with EmbeddedSubtitlesProvider(**config) as provider:
assert provider is not None
def test_inexistent_video(video_inexistent):
with EmbeddedSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video_inexistent, {})
assert len(subtitles) == 0
def test_list_subtitles_only_forced(video_single_language):
with EmbeddedSubtitlesProvider() as provider:
language = Language.fromalpha2("en")
language = Language.rebuild(language, forced=True)
subs = provider.list_subtitles(video_single_language, {language})
assert len(subs) == 0
def test_list_subtitles_also_forced(video_single_language):
with EmbeddedSubtitlesProvider() as provider:
language_1 = Language.fromalpha2("en")
language_2 = Language.rebuild(language_1, forced=True)
subs = provider.list_subtitles(video_single_language, {language_1, language_2})
assert any(language_1 == sub.language for sub in subs)
assert any(not sub.language.forced for sub in subs)
def test_list_subtitles_single_language(video_single_language):
with EmbeddedSubtitlesProvider() as provider:
subs = provider.list_subtitles(
video_single_language, {Language.fromalpha2("en")}
)
for sub in subs:
assert sub.language == Language.fromalpha2("en")
def test_list_subtitles_multiple_languages(video_multiple_languages):
with EmbeddedSubtitlesProvider() as provider:
languages = {Language.fromalpha2(code) for code in ("en", "it", "fr", "es")} | {
Language("por", "BR")
}
subs = provider.list_subtitles(video_multiple_languages, languages)
for expected in languages:
assert any(sub.language == expected for sub in subs)
def test_list_subtitles_wo_ass(video_single_language):
with EmbeddedSubtitlesProvider(include_ass=False) as provider:
subs = provider.list_subtitles(
video_single_language, {Language.fromalpha2("en")}
)
assert not subs
def test_list_subtitles_wo_srt(video_multiple_languages):
with EmbeddedSubtitlesProvider(include_srt=False) as provider:
subs = provider.list_subtitles(
video_multiple_languages, {Language.fromalpha2("en")}
)
assert not subs
def test_download_subtitle_multiple(video_multiple_languages):
with EmbeddedSubtitlesProvider() as provider:
languages = {Language.fromalpha2(code) for code in ("en", "it", "fr")} | {
Language("por", "BR")
}
subs = provider.list_subtitles(video_multiple_languages, languages)
for sub in subs:
provider.download_subtitle(sub)
assert sub.content is not None
def test_download_subtitle_single(video_single_language):
with EmbeddedSubtitlesProvider() as provider:
subtitle = provider.list_subtitles(
video_single_language, {Language.fromalpha2("en")}
)[0]
provider.download_subtitle(subtitle)
assert subtitle.content is not None
def test_download_invalid_subtitle(video_single_language):
with EmbeddedSubtitlesProvider() as provider:
subtitle = provider.list_subtitles(
video_single_language, {Language.fromalpha2("en")}
)[0]
provider._cached_paths[subtitle.container.path] = {
subtitle.stream.index: "dummy.srt"
}
with pytest.raises(fese.InvalidFile):
provider.download_subtitle(subtitle)