This commit is contained in:
morpheus65535 2021-10-09 10:34:50 -04:00
parent aece4bd845
commit 2be567dd7d
17 changed files with 491 additions and 135 deletions

View File

@ -4,7 +4,7 @@ import sys
import os
import io
import ast
from datetime import timedelta, datetime
import datetime
from dateutil import rrule
import pretty
import time
@ -698,7 +698,13 @@ class Series(Resource):
seriesid = request.form.get('seriesid')
action = request.form.get('action')
value = request.form.get('value')
if action == "refresh":
tmdbid = request.form.get('tmdbid')
if tmdbid:
TableShows.update({TableShows.tmdbId: tmdbid}).where(TableShows.seriesId == seriesid).execute()
event_stream(type='series', payload=seriesid)
series_scan_subtitles(seriesid)
return '', 204
elif action == "refresh":
series_scan_subtitles(seriesid)
return '', 204
elif action == "search-missing":
@ -1058,7 +1064,13 @@ class Movies(Resource):
movieid = request.form.get('movieid')
action = request.form.get('action')
value = request.form.get('value')
if action == "refresh":
tmdbid = request.form.get('tmdbid')
if tmdbid:
TableMovies.update({TableMovies.tmdbId: tmdbid}).where(TableMovies.movieId == movieid).execute()
event_stream(type='movie', payload=movieid)
movies_scan_subtitles(movieid)
return '', 204
elif action == "refresh":
movies_scan_subtitles(movieid)
return '', 204
elif action == "search-missing":
@ -1515,7 +1527,7 @@ class EpisodesHistory(Resource):
upgradable_episodes_not_perfect = []
if settings.general.getboolean('upgrade_subs'):
days_to_upgrade_subs = settings.general.days_to_upgrade_subs
minimum_timestamp = ((datetime.now() - timedelta(days=int(days_to_upgrade_subs))) -
minimum_timestamp = ((datetime.datetime.now() - datetime.timedelta(days=int(days_to_upgrade_subs))) -
datetime(1970, 1, 1)).total_seconds()
if settings.general.getboolean('upgrade_manual'):
@ -1607,7 +1619,7 @@ class EpisodesHistory(Resource):
# Make timestamp pretty
if item['timestamp']:
item["raw_timestamp"] = int(item['timestamp'])
item["parsed_timestamp"] = datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item["parsed_timestamp"] = datetime.datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item['timestamp'] = pretty.date(item["raw_timestamp"])
# Check if subtitles is blacklisted
@ -1637,7 +1649,7 @@ class MoviesHistory(Resource):
upgradable_movies_not_perfect = []
if settings.general.getboolean('upgrade_subs'):
days_to_upgrade_subs = settings.general.days_to_upgrade_subs
minimum_timestamp = ((datetime.now() - timedelta(days=int(days_to_upgrade_subs))) -
minimum_timestamp = ((datetime.datetime.now() - datetime.timedelta(days=int(days_to_upgrade_subs))) -
datetime(1970, 1, 1)).total_seconds()
if settings.general.getboolean('upgrade_manual'):
@ -1723,7 +1735,7 @@ class MoviesHistory(Resource):
# Make timestamp pretty
if item['timestamp']:
item["raw_timestamp"] = int(item['timestamp'])
item["parsed_timestamp"] = datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item["parsed_timestamp"] = datetime.datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item['timestamp'] = pretty.date(item["raw_timestamp"])
# Check if subtitles is blacklisted
@ -1798,7 +1810,8 @@ class HistoryStats(Resource):
.dicts()
data_movies = list(data_movies)
for dt in rrule.rrule(rrule.DAILY, dtstart=datetime.now() - timedelta(seconds=delay), until=datetime.now()):
for dt in rrule.rrule(rrule.DAILY, dtstart=datetime.datetime.now() - datetime.timedelta(seconds=delay),
until=datetime.datetime.now()):
if not any(d['date'] == dt.strftime('%Y-%m-%d') for d in data_series):
data_series.append({'date': dt.strftime('%Y-%m-%d'), 'count': 0})
if not any(d['date'] == dt.strftime('%Y-%m-%d') for d in data_movies):
@ -1949,8 +1962,8 @@ class EpisodesBlacklist(Resource):
for item in data:
# Make timestamp pretty
item["parsed_timestamp"] = datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item.update({'timestamp': pretty.date(datetime.fromtimestamp(item['timestamp']))})
item["parsed_timestamp"] = datetime.datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item.update({'timestamp': pretty.date(datetime.datetime.fromtimestamp(item['timestamp']))})
postprocessEpisode(item)
@ -2026,8 +2039,8 @@ class MoviesBlacklist(Resource):
postprocessMovie(item)
# Make timestamp pretty
item["parsed_timestamp"] = datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item.update({'timestamp': pretty.date(datetime.fromtimestamp(item['timestamp']))})
item["parsed_timestamp"] = datetime.datetime.fromtimestamp(int(item['timestamp'])).strftime('%x %X')
item.update({'timestamp': pretty.date(datetime.datetime.fromtimestamp(item['timestamp']))})
return jsonify(data=data)

View File

@ -93,7 +93,7 @@ def parse_video_metadata(file, file_size, media_type, use_cache=True):
except Exception:
pass
else:
# Check if file size and file id matches and if so, we return the cached value
# Check if file size match and if so, we return the cached value
if cached_value['file_size'] == file_size:
return cached_value

View File

@ -5,10 +5,12 @@ import re
import logging
from indexer.tmdb_caching_proxy import tmdb
from database import TableMoviesRootfolder, TableMovies
from event_handler import show_progress, hide_progress
from indexer.video_prop_reader import video_prop_reader
from indexer.tmdb_caching_proxy import tmdb_func_cache
from indexer.utils import normalize_title, VIDEO_EXTENSION
from list_subtitles import store_subtitles_movie
from config import settings
def list_movies_directories(root_dir_id):
@ -30,8 +32,7 @@ def list_movies_directories(root_dir_id):
# get root folder subdirectories (first level). They should be movies parent directories.
for i, directory_temp in enumerate(os.listdir(root_dir_path['path'])):
# remove year fo directory name if found
directory_original = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
directory = re.sub(r"\s\b(19|20)\d{2}\b", '', directory_original).rstrip()
directory = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
# deal with trailing article
if directory.endswith(', The'):
directory = 'The ' + directory.rstrip(', The')
@ -53,19 +54,28 @@ def list_movies_directories(root_dir_id):
def get_movies_match(directory):
# get matching movies from tmdb using the directory name
directory_temp = directory
# get year from directory name if available
year_match = re.search(r"\((\b(19|20)\d{2}\b)\)", directory_temp)
if year_match:
year = year_match.group(1)
else:
year = None
# remove year fo directory name if found
directory_original = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
directory = re.sub(r"\s\b(19|20)\d{2}\b", '', directory_original).rstrip()
directory = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
if directory.endswith(', The'):
directory = 'The ' + directory.rstrip(', The')
elif directory.endswith(', A'):
directory = 'A ' + directory.rstrip(', A')
try:
# get matches from tmdb (potentially from cache)
movies_temp = tmdb_func_cache(tmdb.Search().movie, query=directory)
movies_temp = tmdb_func_cache(tmdb.Search().movie, query=directory, year=year)
except Exception as e:
logging.exception('BAZARR is facing issues indexing movies: {0}'.format(repr(e)))
else:
matching_movies = []
# if there's results, parse them to return matching titles
if movies_temp['total_results']:
if len(movies_temp['results']):
for item in movies_temp['results']:
year = None
if 'release_date' in item:
@ -77,6 +87,8 @@ def get_movies_match(directory):
'tmdbId': item['id']
}
)
else:
logging.debug(f'BAZARR cannot match {directory} with TMDB.')
return matching_movies
@ -86,6 +98,7 @@ def get_movie_file_from_list(path):
max_file = None
for folder, subfolders, files in os.walk(path):
files.sort()
for file in files:
if os.path.splitext(file)[1] in VIDEO_EXTENSION:
if os.path.exists(os.path.join(folder, file)):
@ -108,9 +121,8 @@ def get_movies_metadata(tmdbid, root_dir_id, dir_name=None, movie_path=None):
if tmdbid:
try:
# get movie info, alternative titles and external ids from tmdb using cache if available
movies_info = tmdb_func_cache(tmdb.Movies(tmdbid).info)
alternative_titles = tmdb_func_cache(tmdb.Movies(tmdbid).alternative_titles)
external_ids = tmdb_func_cache(tmdb.Movies(tmdbid).external_ids)
movies_info = tmdb_func_cache(tmdb.Movies(tmdbid).info,
append_to_response='alternative_titles,external_ids')
except Exception as e:
logging.exception('BAZARR is facing issues indexing movies: {0}'.format(repr(e)))
else:
@ -123,8 +135,8 @@ def get_movies_metadata(tmdbid, root_dir_id, dir_name=None, movie_path=None):
'overview': movies_info['overview'],
'poster': images_url.format(movies_info['poster_path']) if movies_info['poster_path'] else None,
'fanart': images_url.format(movies_info['backdrop_path']) if movies_info['backdrop_path'] else None,
'alternativeTitles': [x['title'] for x in alternative_titles['titles']],
'imdbId': external_ids['imdb_id']
'alternativeTitles': [x['title'] for x in movies_info['alternative_titles']['titles']],
'imdbId': movies_info['external_ids']['imdb_id']
}
if dir_name:
@ -137,10 +149,14 @@ def get_movies_metadata(tmdbid, root_dir_id, dir_name=None, movie_path=None):
movies_metadata['tmdbId'] = tmdbid
if movie_file:
movies_metadata.update(video_prop_reader(os.path.join(movie_dir, movie_file)))
movies_metadata.update(video_prop_reader(file=os.path.join(movie_dir, movie_file),
media_type='movie',
use_cache=False))
else:
# otherwise we use only what's required to update the db row
movies_metadata.update(video_prop_reader(movie_path))
movies_metadata.update(video_prop_reader(file=movie_path,
media_type='movie',
use_cache=settings.movies.getboolean('use_ffprobe_cache')))
return movies_metadata
@ -148,14 +164,26 @@ def get_movies_metadata(tmdbid, root_dir_id, dir_name=None, movie_path=None):
def update_indexed_movies():
# update all movies in db, insert new ones and remove old ones
root_dir_ids = TableMoviesRootfolder.select(TableMoviesRootfolder.rootId, TableMoviesRootfolder.path).dicts()
for root_dir_id in root_dir_ids:
root_dir_ids_len = len(root_dir_ids)
for i, root_dir_id in enumerate(root_dir_ids):
show_progress(
id="m1_indexing_root_dirs",
header="Indexing movies root folders...",
name=root_dir_id['path'],
value=i,
count=root_dir_ids_len
)
# for each root folder, get the existing movies rows
existing_movies = TableMovies.select(TableMovies.path,
TableMovies.movieId,
TableMovies.tmdbId)\
.where(TableMovies.rootdir == root_dir_id['rootId'])\
.order_by(TableMovies.title)\
.dicts()
existing_movies_len = len(existing_movies)
existing_movies_iteration_number = 0
for existing_movie in existing_movies:
# delete removed movie form database
if not os.path.exists(existing_movie['path']):
@ -166,17 +194,32 @@ def update_indexed_movies():
root_dir_id=root_dir_id['rootId'],
movie_path=existing_movie['path'])
if movie_metadata:
show_progress(
id="m2_updating_existing_subdirectories_movies",
header="Updating existing movies...",
name=movie_metadata['title'],
value=existing_movies_iteration_number,
count=existing_movies_len
)
TableMovies.update(movie_metadata).where(TableMovies.movieId ==
existing_movie['movieId']).execute()
store_subtitles_movie(existing_movie['path'], use_cache=True)
store_subtitles_movie(existing_movie['path'],
use_cache=settings.movies.getboolean('use_ffprobe_cache'))
existing_movies_iteration_number += 1
hide_progress(id="m2_updating_existing_subdirectories_movies")
# add missing movies to database
root_dir_subdirectories = list_movies_directories(root_dir_id['rootId'])
# get existing movies paths
existing_movies_paths = [os.path.dirname(x['path']) for x in existing_movies]
root_dir_subdirectories_len = len(root_dir_subdirectories) - \
len(TableMovies.select().where(TableMovies.rootdir == root_dir_id['rootId']))
root_dir_subdirectories_iteration_number = 0
for root_dir_subdirectory in root_dir_subdirectories:
if os.path.join(root_dir_id['path'], root_dir_subdirectory['directory']) in existing_movies_paths:
# movie is already in db so we'll skip it
root_dir_subdirectories_iteration_number += 1
continue
else:
# new movie, let's get matches for it
@ -186,6 +229,14 @@ def update_indexed_movies():
directory_metadata = get_movies_metadata(root_dir_match[0]['tmdbId'], root_dir_id['rootId'],
root_dir_subdirectory['directory'])
if directory_metadata and directory_metadata['path']:
show_progress(
id="m2_adding_new_subdirectories_movies",
header="Adding new movies...",
name=directory_metadata['title'],
value=root_dir_subdirectories_iteration_number,
count=root_dir_subdirectories_len
)
try:
# let's insert this movie into the db
result = TableMovies.insert(directory_metadata).execute()
@ -196,6 +247,9 @@ def update_indexed_movies():
if result:
# once added to the db, we'll index existing subtitles and calculate the missing ones
store_subtitles_movie(directory_metadata['path'], use_cache=False)
root_dir_subdirectories_iteration_number += 1
hide_progress(id="m2_adding_new_subdirectories_movies")
hide_progress(id="m1_indexing_root_dirs")
def update_specific_movie(movieId, use_cache=True):
@ -213,5 +267,5 @@ def update_specific_movie(movieId, use_cache=True):
f'"{movie_metadata["path"]}". The exception encountered is "{e}".')
else:
if result:
# index existign subtitles and calculate missing ones
# index existing subtitles and calculate missing ones
store_subtitles_movie(movie_metadata['path'], use_cache=use_cache)

View File

@ -6,16 +6,19 @@ from indexer.tmdb_caching_proxy import tmdb
from guessit import guessit
from requests.exceptions import HTTPError
from database import TableShows, TableEpisodes
from event_handler import show_progress, hide_progress
from indexer.video_prop_reader import video_prop_reader
from indexer.tmdb_caching_proxy import tmdb_func_cache
from indexer.utils import VIDEO_EXTENSION
from list_subtitles import store_subtitles
from config import settings
def get_series_episodes(series_directory):
# return, for a specific series path, all the video files that can be recursively found
episodes_path = []
for root, dirs, files in os.walk(series_directory):
files.sort()
for filename in files:
if os.path.splitext(filename)[1] in VIDEO_EXTENSION and filename[0] != '.':
if os.path.exists(os.path.join(root, filename)):
@ -31,15 +34,24 @@ def get_episode_metadata(file, tmdbid, series_id, update=False):
# guess season an episode number from filename
guessed = guessit(file)
if 'season' in guessed and 'episode' in guessed:
if isinstance(guessed['season'], int):
# single season file
season_number = guessed['season']
else:
# for multiple season file, we use the first one. This one is really strange but I've run into it during
# development...
season_number = guessed['season'][0]
if isinstance(guessed['episode'], int):
# single episode file
episode_number = guessed['episode']
else:
# for multiple episode file, we use the first one. ex.: S01E01-02 will be added as episode 1
episode_number = guessed['episode'][0]
try:
# get episode metadata from tmdb
episode_info = tmdb_func_cache(tmdb.TV_Episodes(tv_id=tmdbid, season_number=guessed['season'],
episode_info = tmdb_func_cache(tmdb.TV_Episodes(tv_id=tmdbid, season_number=season_number,
episode_number=episode_number).info)
except HTTPError:
logging.debug(f"BAZARR can't find this episode on TMDB: {file}")
@ -48,10 +60,10 @@ def get_episode_metadata(file, tmdbid, series_id, update=False):
except Exception:
logging.exception(f'BAZARR is facing issues indexing this episodes: {file}')
return False
else:
finally:
episode_metadata = {
'title': episode_info['name'],
'season': guessed['season'],
'season': season_number,
'episode': episode_number
}
if not update:
@ -69,7 +81,8 @@ def get_episode_metadata(file, tmdbid, series_id, update=False):
else:
episode_metadata['monitored'] = series_monitored_state['monitored']
# we now get the video file metadata using ffprobe
episode_metadata.update(video_prop_reader(file))
episode_metadata.update(video_prop_reader(file=file, media_type='episode',
use_cache=settings.series.getboolean('use_ffprobe_cache')))
return episode_metadata
@ -87,12 +100,28 @@ def update_series_episodes(seriesId=None, use_cache=True):
existing_series_episodes = TableEpisodes.select(TableEpisodes.path,
TableEpisodes.seriesId,
TableEpisodes.episodeId,
TableShows.tmdbId)\
TableShows.tmdbId,
TableEpisodes.title.alias('episodeTitle'),
TableEpisodes.season,
TableEpisodes.episode,
TableShows.title.alias('seriesTitle'))\
.join(TableShows)\
.where(TableEpisodes.seriesId == series_id)\
.order_by(TableEpisodes.season, TableEpisodes.episode)\
.dicts()
existing_series_episodes_len = len(existing_series_episodes)
existing_series_episodes_iteration_number = 0
for existing_series_episode in existing_series_episodes:
show_progress(
id="s3_series_episodes_update",
header=f"Updating {existing_series_episode['seriesTitle']} episodes...",
name=f"S{existing_series_episode['season']:02d}E{existing_series_episode['episode']:02d} - "
f"{existing_series_episode['episodeTitle']}",
value=existing_series_episodes_iteration_number,
count=existing_series_episodes_len
)
# delete removed episodes form database
if not os.path.exists(existing_series_episode['path']):
TableEpisodes.delete().where(TableEpisodes.path == existing_series_episode['path']).execute()
@ -110,10 +139,15 @@ def update_series_episodes(seriesId=None, use_cache=True):
# indexing existing subtitles and missing ones.
store_subtitles(existing_series_episode['path'], use_cache=use_cache)
existing_series_episodes_iteration_number += 1
hide_progress(id="s3_series_episodes_update")
# add missing episodes to database
try:
# get series row from db
series_metadata = TableShows.select(TableShows.path,
TableShows.title,
TableShows.tmdbId) \
.where(TableShows.seriesId == series_id) \
.dicts() \
@ -124,15 +158,28 @@ def update_series_episodes(seriesId=None, use_cache=True):
# get all the episodes for that series
episodes = get_series_episodes(series_metadata['path'])
# make it a list of paths
existing_episodes = [x['path'] for x in existing_series_episodes]
existing_episodes = [x['path'] for x in
TableEpisodes.select().where(TableEpisodes.seriesId == series_id).dicts()]
existing_episodes_len = len(episodes) - len(existing_episodes)
existing_episodes_iteration_number = 0
for episode in episodes:
if episode in existing_episodes:
# skip episode if it's already in DB (been updated earlier)
existing_episodes_iteration_number += 1
continue
else:
# get episode metadata form tmdb
episode_metadata = get_episode_metadata(episode, series_metadata['tmdbId'], series_id, update=False)
if episode_metadata:
show_progress(
id="s3_series_episodes_add",
header=f"Adding {series_metadata['title']} episodes...",
name=f"S{episode_metadata['season']:02d}E{episode_metadata['episode']:02d} - "
f"{episode_metadata['title']}",
value=existing_episodes_iteration_number,
count=existing_episodes_len
)
try:
# insert episod eto db
result = TableEpisodes.insert(episode_metadata).execute()
@ -142,4 +189,6 @@ def update_series_episodes(seriesId=None, use_cache=True):
else:
if result:
# index existing subtitles and missing ones
store_subtitles(episode, use_cache=use_cache)
store_subtitles(episode, use_cache=False)
existing_episodes_iteration_number += 1
hide_progress(id="s3_series_episodes_add")

View File

@ -5,9 +5,11 @@ import re
import logging
from indexer.tmdb_caching_proxy import tmdb
from database import TableShowsRootfolder, TableShows
from event_handler import show_progress, hide_progress
from indexer.tmdb_caching_proxy import tmdb_func_cache
from indexer.utils import normalize_title
from .episodes_indexer import update_series_episodes
from config import settings
def list_series_directories(root_dir):
@ -28,8 +30,7 @@ def list_series_directories(root_dir):
return series_directories
for i, directory_temp in enumerate(os.listdir(root_dir_path['path'])):
# iterate over each directories under the root folder path and strip year if present
directory_original = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
directory = re.sub(r"\s\b(19|20)\d{2}\b", '', directory_original).rstrip()
directory = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
# deal with trailing article
if directory.endswith(', The'):
directory = 'The ' + directory.rstrip(', The')
@ -51,19 +52,28 @@ def list_series_directories(root_dir):
def get_series_match(directory):
# get matching series from tmdb using the directory name
directory_temp = directory
# get year from directory name if available
year_match = re.search(r"\((\b(19|20)\d{2}\b)\)", directory_temp)
if year_match:
year = year_match.group(1)
else:
year = None
# remove year fo directory name if found
directory_original = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
directory = re.sub(r"\s\b(19|20)\d{2}\b", '', directory_original).rstrip()
directory = re.sub(r"\(\b(19|20)\d{2}\b\)", '', directory_temp).rstrip()
if directory.endswith(', The'):
directory = 'The ' + directory.rstrip(', The')
elif directory.endswith(', A'):
directory = 'A ' + directory.rstrip(', A')
try:
# get matches from tmdb (potentially from cache)
series_temp = tmdb_func_cache(tmdb.Search().tv, query=directory)
series_temp = tmdb_func_cache(tmdb.Search().tv, query=directory, year=year)
except Exception as e:
logging.exception('BAZARR is facing issues indexing series: {0}'.format(repr(e)))
else:
matching_series = []
# if there's results, parse them to return matching titles
if series_temp['total_results']:
if len(series_temp['results']):
for item in series_temp['results']:
year = None
if 'first_air_date' in item:
@ -75,6 +85,8 @@ def get_series_match(directory):
'tmdbId': item['id']
}
)
else:
logging.debug(f'BAZARR cannot match {directory} with TMDB.')
return matching_series
@ -89,9 +101,7 @@ def get_series_metadata(tmdbid, root_dir_id, dir_name=None):
if tmdbid:
try:
# get series info, alternative titles and external ids from tmdb using cache if available
series_info = tmdb_func_cache(tmdb.TV(tmdbid).info)
alternative_titles = tmdb_func_cache(tmdb.TV(tmdbid).alternative_titles)
external_ids = tmdb_func_cache(tmdb.TV(tmdbid).external_ids)
series_info = tmdb_func_cache(tmdb.TV(tmdbid).info, append_to_response='alternative_titles,external_ids')
except Exception as e:
logging.exception('BAZARR is facing issues indexing series: {0}'.format(repr(e)))
else:
@ -104,8 +114,8 @@ def get_series_metadata(tmdbid, root_dir_id, dir_name=None):
'overview': series_info['overview'],
'poster': images_url.format(series_info['poster_path']) if series_info['poster_path'] else None,
'fanart': images_url.format(series_info['backdrop_path'])if series_info['backdrop_path'] else None,
'alternateTitles': [x['title'] for x in alternative_titles['results']],
'imdbId': external_ids['imdb_id']
'alternateTitles': [x['title'] for x in series_info['alternative_titles']['results']],
'imdbId': series_info['external_ids']['imdb_id']
}
# only for initial import and not update
@ -120,14 +130,25 @@ def get_series_metadata(tmdbid, root_dir_id, dir_name=None):
def update_indexed_series():
# update all series in db, insert new ones and remove old ones
root_dir_ids = TableShowsRootfolder.select(TableShowsRootfolder.rootId, TableShowsRootfolder.path).dicts()
for root_dir_id in root_dir_ids:
root_dir_ids_len = len(root_dir_ids)
for i, root_dir_id in enumerate(root_dir_ids):
show_progress(
id="s1_indexing_root_dirs",
header="Indexing series root folders...",
name=root_dir_id['path'],
value=i,
count=root_dir_ids_len
)
# for each root folder, get the existing series rows
root_dir_subdirectories = list_series_directories(root_dir_id['rootId'])
existing_subdirectories = [x['path'] for x in
TableShows.select(TableShows.path)
.where(TableShows.rootdir == root_dir_id['rootId'])
.order_by(TableShows.title)
.dicts()]
existing_subdirectories_len = len(existing_subdirectories)
existing_subdirectories_iteration_number = 0
for existing_subdirectory in existing_subdirectories:
# delete removed series from database
if not os.path.exists(existing_subdirectory):
@ -137,16 +158,31 @@ def update_indexed_series():
show_metadata = TableShows.select().where(TableShows.path == existing_subdirectory).dicts().get()
directory_metadata = get_series_metadata(show_metadata['tmdbId'], root_dir_id['rootId'])
if directory_metadata:
show_progress(
id="s2_updating_existing_subdirectories",
header="Updating existing series...",
name=directory_metadata['title'],
value=existing_subdirectories_iteration_number,
count=existing_subdirectories_len
)
result = TableShows.update(directory_metadata)\
.where(TableShows.tmdbId == show_metadata['tmdbId'])\
.execute()
if result:
update_series_episodes(seriesId=show_metadata['seriesId'], use_cache=True)
update_series_episodes(seriesId=show_metadata['seriesId'],
use_cache=settings.series.getboolean('use_ffprobe_cache'))
existing_subdirectories_iteration_number += 1
hide_progress(id="s2_updating_existing_subdirectories")
# add missing series to database
new_subdirectories_len = len(root_dir_subdirectories) - len(TableShows.select(TableShows.path)
.where(TableShows.rootdir == root_dir_id['rootId']))
new_directories_iteration_number = 0
for root_dir_subdirectory in root_dir_subdirectories:
if os.path.join(root_dir_id['path'], root_dir_subdirectory['directory']) in existing_subdirectories:
# series is already in db so we'll skip it
new_directories_iteration_number += 1
continue
else:
# new series, let's get matches for it
@ -155,6 +191,13 @@ def update_indexed_series():
# now that we have at least a match, we'll assume the first one is the good one and get metadata
directory_metadata = get_series_metadata(root_dir_match[0]['tmdbId'], root_dir_id['rootId'],
root_dir_subdirectory['directory'])
show_progress(
id="s2_adding_new_subdirectories",
header="Adding new series...",
name=directory_metadata['title'],
value=new_directories_iteration_number,
count=new_subdirectories_len
)
if directory_metadata:
try:
# let's insert this series into the db
@ -166,6 +209,9 @@ def update_indexed_series():
if series_id:
# once added to the db, we'll check for episodes for this series
update_series_episodes(seriesId=series_id, use_cache=False)
new_directories_iteration_number += 1
hide_progress(id="s2_adding_new_subdirectories")
hide_progress(id="s1_indexing_root_dirs")
def update_specific_series(seriesId):

View File

@ -8,17 +8,6 @@ CommonWordRegex = re.compile(r"\b(a|an|the|and|or|of)\b\s?")
DuplicateSpacesRegex = re.compile(r"\s{2,}")
def normalize_title(title):
title = title.lower()
title = re.sub(WordDelimiterRegex, " ", title)
title = re.sub(PunctuationRegex, "", title)
title = re.sub(CommonWordRegex, "", title)
title = re.sub(DuplicateSpacesRegex, " ", title)
return title.strip()
VIDEO_EXTENSION = [
# Unknown
".webm",
@ -72,3 +61,14 @@ VIDEO_EXTENSION = [
# Bluray
".m2ts",
]
def normalize_title(title):
title = title.lower()
title = re.sub(WordDelimiterRegex, " ", title)
title = re.sub(PunctuationRegex, "", title)
title = re.sub(CommonWordRegex, "", title)
title = re.sub(DuplicateSpacesRegex, " ", title)
return title.strip()

View File

@ -4,14 +4,16 @@ import logging
import os
from knowit import api
import enzyme
import pickle
from enzyme.exceptions import MalformedMKVError
from utils import get_binary
from get_languages import language_from_alpha3
from database import TableEpisodes, TableMovies
from indexer.utils import VIDEO_EXTENSION
def video_prop_reader(file):
def video_prop_reader(file, media_type, use_cache=False):
# function to get video properties from a media file
video_prop = {}
@ -23,25 +25,65 @@ def video_prop_reader(file):
# get the ffprobe path
ffprobe_path = get_binary("ffprobe")
# if we have ffprobe available
if ffprobe_path:
api.initialize({"provider": "ffmpeg", "ffmpeg": ffprobe_path})
data = api.know(file)
# if not, we use enzyme for mkv files
elif not ffprobe_path and os.path.splitext(file)[1] == "mkv":
if os.path.splitext(file)[1] == ".mkv":
with open(file, "rb") as f:
try:
mkv = enzyme.MKV(f)
except MalformedMKVError:
logging.error(
"BAZARR cannot analyze this MKV with our built-in MKV parser, you should install "
"ffmpeg/ffprobe: " + file
)
else:
data = mkv
else:
logging.debug(f"ffprobe not available and enzyme doesn't support this file extension: {file}")
# Define default data keys value
data = {
"ffprobe": {},
"enzyme": {},
"file_size": os.stat(file).st_size,
}
no_cache = False
if use_cache:
# Get the actual cache value form database
try:
if media_type == 'episode':
cache_key = TableEpisodes.select(TableEpisodes.ffprobe_cache)\
.where(TableEpisodes.path == file)\
.dicts()\
.get()
elif media_type == 'movie':
cache_key = TableMovies.select(TableMovies.ffprobe_cache)\
.where(TableMovies.path == file)\
.dicts()\
.get()
else:
cache_key = None
except (TableEpisodes.DoesNotExist, TableMovies.DoesNotExist):
no_cache = True
pass
else:
# check if we have a value for that cache key
try:
# Unpickle ffprobe cache
cached_value = pickle.loads(cache_key['ffprobe_cache'])
except Exception:
pass
else:
# Check if file size match and if so, we return the cached value
if cached_value['file_size'] == os.stat(file).st_size:
data = cached_value['ffprobe'] if ffprobe_path else cached_value['']
if no_cache:
# ok we wont use cache...
# if we have ffprobe available
if ffprobe_path:
api.initialize({"provider": "ffmpeg", "ffmpeg": ffprobe_path})
data = api.know(file)
# if not, we use enzyme for mkv files
elif not ffprobe_path and os.path.splitext(file)[1] == "mkv":
if os.path.splitext(file)[1] == ".mkv":
with open(file, "rb") as f:
try:
mkv = enzyme.MKV(f)
except MalformedMKVError:
logging.error(
"BAZARR cannot analyze this MKV with our built-in MKV parser, you should install "
"ffmpeg/ffprobe: " + file
)
else:
data = mkv
else:
logging.debug(f"ffprobe not available and enzyme doesn't support this file extension: {file}")
if data:
audio_language = []
@ -49,11 +91,10 @@ def video_prop_reader(file):
video_resolution = None
video_codec = None
audio_codec = None
file_size = None
file_size = os.path.getsize(file)
if ffprobe_path:
# if ffprobe has been used, we populate our dict using returned values
file_size = data['size']
if 'video' in data and len(data['video']):
video_resolution = data['video'][0]['resolution']
if 'codec' in data['video'][0]:
@ -73,7 +114,6 @@ def video_prop_reader(file):
pass
elif not ffprobe_path and os.path.splitext(file)[1] == "mkv":
# if we didn't found ffprobe but the file is an mkv, we parse enzyme metadata and populate our dict wit it.
file_size = os.path.getsize(file)
if len(data.video_tracks):
for video_track in data.video_tracks:
video_resolution = str(video_track.height) + 'p'

View File

@ -499,9 +499,7 @@ def movies_full_scan_subtitles():
def series_scan_subtitles(no):
from indexer.series.local.series_indexer import update_specific_series
from indexer.series.local.episodes_indexer import update_series_episodes
update_specific_series(no)
update_series_episodes(no, use_cache=False)
def movies_scan_subtitles(no):

View File

@ -148,13 +148,42 @@ class Scheduler:
def __series_indexer(self):
if settings.general.getboolean('use_series'):
self.aps_scheduler.add_job(update_indexed_series, CronTrigger(year='2100'), max_instances=1,
id='update_indexed_series', name='Refresh Series')
full_update = settings.series.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
update_indexed_series, CronTrigger(hour=settings.series.full_update_hour), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_indexed_series', name='Refresh Series from disk',
replace_existing=True)
elif full_update == "Weekly":
self.aps_scheduler.add_job(
update_indexed_series,
CronTrigger(day_of_week=settings.series.full_update_day, hour=settings.series.full_update_hour),
max_instances=1, coalesce=True, misfire_grace_time=15, id='update_indexed_series',
name='Refresh Series from disk', replace_existing=True)
elif full_update == "Manually":
self.aps_scheduler.add_job(update_indexed_series, CronTrigger(year='2100'), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_indexed_series',
name='Refresh Series from disk', replace_existing=True)
def __movies_indexer(self):
if settings.general.getboolean('use_movies'):
self.aps_scheduler.add_job(update_indexed_movies, CronTrigger(year='2100'), max_instances=1,
id='update_indexed_movies', name='Refresh Movies')
full_update = settings.movies.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
update_indexed_movies, CronTrigger(hour=settings.movies.full_update_hour), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_indexed_movies', name='Refresh Movies from disk',
replace_existing=True)
elif full_update == "Weekly":
self.aps_scheduler.add_job(
update_indexed_movies,
CronTrigger(day_of_week=settings.movies.full_update_day, hour=settings.movies.full_update_hour),
max_instances=1, coalesce=True, misfire_grace_time=15, id='update_indexed_movies',
name='Refresh Movies from disk', replace_existing=True)
elif full_update == "Manually":
self.aps_scheduler.add_job(
update_indexed_movies, CronTrigger(year='2100'), max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_indexed_movies', name='Refresh Movies from disk',
replace_existing=True)
def __update_bazarr_task(self):
if not args.no_update and os.environ["BAZARR_VERSION"] != '':

View File

@ -112,6 +112,7 @@ declare namespace Item {
fanart: string;
overview: string;
imdbId: string;
tmdbId: string;
alternativeTitles: string[];
poster: string;
year: string;

View File

@ -4,6 +4,11 @@ declare namespace FormType {
profileid: (number | null)[];
}
interface FixMatchItem {
id: number[];
tmdbid: string[];
}
type SeriesAction =
| OneSerieAction
| SearchWantedAction

View File

@ -1,5 +1,6 @@
import {
faCloudUploadAlt,
faCodeBranch,
faHistory,
faSearch,
faSync,
@ -18,6 +19,7 @@ import { useMovieBy, useProfileBy } from "../../@redux/hooks";
import { MoviesApi, ProvidersApi } from "../../apis";
import {
ContentHeader,
FixMatchModal,
ItemEditorModal,
LoadingIndicator,
MovieHistoryModal,
@ -84,21 +86,6 @@ const MovieDetailView: FunctionComponent<Props> = ({ match }) => {
</Helmet>
<ContentHeader>
<ContentHeader.Group pos="start">
<ContentHeader.Button
icon={faSync}
disabled={hasTask}
onClick={() => {
const task = createTask(
item.title,
id,
MoviesApi.action.bind(MoviesApi),
{ action: "refresh", movieid: id }
);
dispatchTask("Refreshing movie...", [task], "Refreshing...");
}}
>
Refresh
</ContentHeader.Button>
<ContentHeader.Button
icon={faSearch}
disabled={item.profileId === null || hasTask}
@ -124,6 +111,13 @@ const MovieDetailView: FunctionComponent<Props> = ({ match }) => {
>
Manual
</ContentHeader.Button>
<ContentHeader.Button
disabled={!allowEdit || item.profileId === null || hasTask}
icon={faCloudUploadAlt}
onClick={() => showModal("upload", item)}
>
Upload
</ContentHeader.Button>
<ContentHeader.Button
icon={faHistory}
onClick={() => showModal("history", item)}
@ -141,11 +135,26 @@ const MovieDetailView: FunctionComponent<Props> = ({ match }) => {
<ContentHeader.Group pos="end">
<ContentHeader.Button
disabled={!allowEdit || item.profileId === null || hasTask}
icon={faCloudUploadAlt}
onClick={() => showModal("upload", item)}
icon={faSync}
disabled={hasTask}
onClick={() => {
const task = createTask(
item.title,
id,
MoviesApi.action.bind(MoviesApi),
{ action: "refresh", movieid: id }
);
dispatchTask("Refreshing movie...", [task], "Refreshing...");
}}
>
Upload
Refresh
</ContentHeader.Button>
<ContentHeader.Button
icon={faCodeBranch}
disabled={hasTask}
onClick={() => showModal("match", item)}
>
Fix Match
</ContentHeader.Button>
<ContentHeader.Button
icon={faWrench}
@ -178,6 +187,10 @@ const MovieDetailView: FunctionComponent<Props> = ({ match }) => {
></ItemEditorModal>
<SubtitleToolModal modalKey="tools" size="lg"></SubtitleToolModal>
<MovieHistoryModal modalKey="history" size="lg"></MovieHistoryModal>
<FixMatchModal
modalKey="match"
submit={(form) => MoviesApi.fixmatch(form)}
></FixMatchModal>
<MovieUploadModal modalKey="upload" size="lg"></MovieUploadModal>
<ManualSearchModal
modalKey="manual-search"

View File

@ -2,6 +2,7 @@ import {
faAdjust,
faBriefcase,
faCloudUploadAlt,
faCodeBranch,
faHdd,
faSearch,
faSync,
@ -18,6 +19,7 @@ import { useEpisodesBy, useProfileBy, useSerieBy } from "../../@redux/hooks";
import { SeriesApi } from "../../apis";
import {
ContentHeader,
FixMatchModal,
ItemEditorModal,
LoadingIndicator,
SeriesUploadModal,
@ -89,24 +91,6 @@ const SeriesEpisodesView: FunctionComponent<Props> = (props) => {
</Helmet>
<ContentHeader>
<ContentHeader.Group pos="start">
<ContentHeader.Button
icon={faSync}
disabled={hasTask}
onClick={() => {
const task = createTask(
serie.title,
id,
SeriesApi.action.bind(SeriesApi),
{
action: "refresh",
seriesid: id,
}
);
dispatchTask("Refreshing series...", [task], "Refreshing...");
}}
>
Refresh
</ContentHeader.Button>
<ContentHeader.Button
icon={faSearch}
onClick={() => {
@ -130,15 +114,6 @@ const SeriesEpisodesView: FunctionComponent<Props> = (props) => {
>
Search
</ContentHeader.Button>
</ContentHeader.Group>
<ContentHeader.Group pos="end">
<ContentHeader.Button
disabled={serie.episodeFileCount === 0 || !available || hasTask}
icon={faBriefcase}
onClick={() => showModal("tools", episodes.content)}
>
Tools
</ContentHeader.Button>
<ContentHeader.Button
disabled={
serie.episodeFileCount === 0 ||
@ -151,6 +126,40 @@ const SeriesEpisodesView: FunctionComponent<Props> = (props) => {
>
Upload
</ContentHeader.Button>
<ContentHeader.Button
disabled={serie.episodeFileCount === 0 || !available || hasTask}
icon={faBriefcase}
onClick={() => showModal("tools", episodes.content)}
>
Tools
</ContentHeader.Button>
</ContentHeader.Group>
<ContentHeader.Group pos="end">
<ContentHeader.Button
icon={faSync}
disabled={hasTask}
onClick={() => {
const task = createTask(
serie.title,
id,
SeriesApi.action.bind(SeriesApi),
{
action: "refresh",
seriesid: id,
}
);
dispatchTask("Refreshing series...", [task], "Refreshing...");
}}
>
Refresh
</ContentHeader.Button>
<ContentHeader.Button
icon={faCodeBranch}
disabled={hasTask}
onClick={() => showModal("match", serie)}
>
Fix Match
</ContentHeader.Button>
<ContentHeader.Button
icon={faWrench}
disabled={hasTask}
@ -185,6 +194,10 @@ const SeriesEpisodesView: FunctionComponent<Props> = (props) => {
modalKey="edit"
submit={(form) => SeriesApi.modify(form)}
></ItemEditorModal>
<FixMatchModal
modalKey="match"
submit={(form) => SeriesApi.fixmatch(form)}
></FixMatchModal>
<SeriesUploadModal
modalKey="upload"
episodes={episodes.content}

View File

@ -67,6 +67,10 @@ class MovieApi extends BaseApi {
return response;
}
async fixmatch(form: FormType.FixMatchItem) {
await this.patch("", { movieid: form.id, tmdbid: form.tmdbid });
}
async action(action: FormType.MoviesAction) {
await this.patch("", action);
}

View File

@ -21,6 +21,10 @@ class SeriesApi extends BaseApi {
await this.post("", { seriesid: form.id, profileid: form.profileid });
}
async fixmatch(form: FormType.FixMatchItem) {
await this.patch("", { seriesid: form.id, tmdbid: form.tmdbid });
}
async action(form: FormType.SeriesAction) {
await this.patch("", form);
}

View File

@ -0,0 +1,86 @@
import React, { FunctionComponent, useState } from "react";
import { Container, Form } from "react-bootstrap";
import { AsyncButton } from "../";
import { useIsAnyTaskRunningWithId } from "../../@modules/task/hooks";
import { GetItemId } from "../../utilities";
import BaseModal, { BaseModalProps } from "./BaseModal";
import { useModalInformation } from "./hooks";
interface Props {
submit: (form: FormType.FixMatchItem) => Promise<void>;
onSuccess?: (item: Item.Base) => void;
}
const FixMatch: FunctionComponent<Props & BaseModalProps> = (props) => {
const { onSuccess, submit, ...modal } = props;
const { payload, closeModal } = useModalInformation<Item.Base>(
modal.modalKey
);
// TODO: Separate movies and series
const hasTask = useIsAnyTaskRunningWithId([GetItemId(payload ?? {})]);
const [tmdbid, setValue] = useState("");
const [updating, setUpdating] = useState(false);
const footer = (
<AsyncButton
noReset
onChange={setUpdating}
disabled={hasTask}
promise={() => {
if (payload) {
const itemId = GetItemId(payload);
return submit({
id: [itemId],
tmdbid: [tmdbid],
});
} else {
return null;
}
}}
onSuccess={() => {
closeModal();
onSuccess && payload && onSuccess(payload);
}}
>
Fix Match and refresh
</AsyncButton>
);
return (
<BaseModal
closeable={!updating}
footer={footer}
title={payload?.title}
{...modal}
>
<Container fluid>
<Form>
{payload?.tmdbId ? (
<Form.Group>
<Form.Label>Actual TMDB ID</Form.Label>
<Form.Control
type="text"
disabled
defaultValue={payload?.tmdbId}
></Form.Control>
</Form.Group>
) : null}
<Form.Group>
<Form.Label>Desired TMDB ID</Form.Label>
<Form.Control
type="text"
required
onChange={(e) => setValue(e.target.value)}
></Form.Control>
</Form.Group>
</Form>
</Container>
</BaseModal>
);
};
export default FixMatch;

View File

@ -1,4 +1,5 @@
export * from "./BaseModal";
export { default as FixMatchModal } from "./FixMatchModal";
export * from "./HistoryModal";
export * from "./hooks";
export { default as ItemEditorModal } from "./ItemEditorModal";