Merge development into master

This commit is contained in:
github-actions[bot] 2021-10-12 23:45:07 +00:00 committed by GitHub
commit 88b69b5243
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 785 additions and 217 deletions

View File

@ -66,12 +66,14 @@ If you need something that is not already part of Bazarr, feel free to create a
* Subscenter
* Subsunacs.net
* SubSynchro
* Subtitrari-noi.ro
* subtitri.id.lv
* Subtitulamos.tv
* Sucha
* Supersubtitles
* Titlovi
* Titrari.ro
* Titulky.com
* TuSubtitulo
* TVSubtitles
* Wizdom

View File

@ -2008,18 +2008,25 @@ class SubtitleNameInfo(Resource):
for name in names:
opts = dict()
opts['type'] = 'episode'
result = guessit(name, options=opts)
guessit_result = guessit(name, options=opts)
result = {}
result['filename'] = name
if 'subtitle_language' in result:
result['subtitle_language'] = str(result['subtitle_language'])
if 'subtitle_language' in guessit_result:
result['subtitle_language'] = str(guessit_result['subtitle_language'])
if 'episode' in result:
result['episode'] = result['episode']
else:
result['episode'] = 0
result['episode'] = 0
if 'episode' in guessit_result:
if isinstance(guessit_result['episode'], list):
# for multiple episodes file, choose the first episode number
if len(guessit_result['episode']):
# make sure that guessit returned a list of more than 0 items
result['episode'] = int(guessit_result['episode'][0])
elif isinstance(guessit_result['episode'], (str, int)):
# if single episode (should be int but just in case we cast it to int)
result['episode'] = int(guessit_result['episode'])
if 'season' in result:
result['season'] = result['season']
if 'season' in guessit_result:
result['season'] = int(guessit_result['season'])
else:
result['season'] = 0

View File

@ -36,7 +36,7 @@ def check_releases():
if asset['name'] == 'bazarr.zip':
download_link = asset['browser_download_url']
if not download_link:
download_link = release['zipball_url']
continue
releases.append({'name': release['name'],
'body': release['body'],
'date': release['published_at'],

View File

@ -134,6 +134,9 @@ defaults = {
'username': '',
'password': ''
},
'podnapisi': {
'verify_ssl': 'True'
},
'legendasdivx': {
'username': '',
'password': '',
@ -180,6 +183,10 @@ defaults = {
'username': '',
'password': ''
},
'titulky': {
'username': '',
'password': ''
},
'subsync': {
'use_subsync': 'False',
'use_subsync_threshold': 'False',

View File

@ -57,8 +57,7 @@ PROVIDER_THROTTLE_MAP = {
},
"opensubtitlescom": {
TooManyRequests : (datetime.timedelta(minutes=1), "1 minute"),
DownloadLimitExceeded: (
datetime.timedelta(hours=hours_until_end_of_day), "{} hours".format(str(hours_until_end_of_day))),
DownloadLimitExceeded: (datetime.timedelta(hours=24), "24 hours"),
},
"addic7ed" : {
DownloadLimitExceeded: (datetime.timedelta(hours=3), "3 hours"),
@ -148,6 +147,7 @@ def get_providers_auth():
'podnapisi' : {
'only_foreign': False, # fixme
'also_foreign': False, # fixme
'verify_ssl': settings.podnapisi.getboolean('verify_ssl')
},
'subscene' : {
'username' : settings.subscene.username,

View File

@ -59,13 +59,11 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
lang = lang + ":hi"
logging.debug("BAZARR embedded subtitles detected: " + lang)
actual_subtitles.append([lang, None])
except:
logging.debug("BAZARR unable to index this unrecognized language: " + subtitle_language)
pass
except Exception as error:
logging.debug("BAZARR unable to index this unrecognized language: %s (%s)", subtitle_language, error)
except Exception as e:
logging.exception(
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path))
pass
try:
dest_folder = get_subtitle_destination_folder()
core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
@ -307,9 +305,9 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
if item not in actual_subtitles_list:
missing_subtitles_list.append(item)
# remove missing that have forced or hi subtitles for this language in existing
# remove missing that have hi subtitles for this language in existing
for item in actual_subtitles_list:
if item[1] == 'True' or item[2] == 'True':
if item[2] == 'True':
try:
missing_subtitles_list.remove([item[0], 'False', 'False'])
except ValueError:

View File

@ -30,7 +30,7 @@
"react-select": "^4",
"react-table": "^7",
"recharts": "^2.0.8",
"rooks": "^5",
"rooks": "^5.7.1",
"sass": "^1",
"socket.io-client": "^4",
"typescript": "^4"
@ -3416,9 +3416,9 @@
}
},
"node_modules/@types/bootstrap": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/@types/bootstrap/-/bootstrap-5.1.0.tgz",
"integrity": "sha512-cR+eQJ/IrgcJZheb7xkKPiPNOa48zkc6fLZ4U9lDNNQp3qiiq3tW1xgrd+VzVJCram/Bnh+DdBsdsPdXKIYClA==",
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/@types/bootstrap/-/bootstrap-5.1.6.tgz",
"integrity": "sha512-3L6IvOCKyoVd3e4bgQTH7VBPbuYEOG8IQbRcuZ0AbjfwPdRX+kVf5L/7mVt1EVM+D/BVw4+71rtp7Z8yYROlpQ==",
"dev": true,
"dependencies": {
"@popperjs/core": "^2.9.2",
@ -18088,9 +18088,9 @@
"integrity": "sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w=="
},
"node_modules/rooks": {
"version": "5.4.6",
"resolved": "https://registry.npmjs.org/rooks/-/rooks-5.4.6.tgz",
"integrity": "sha512-BTVEPgCC2WFfLEYlvf1jtDPI7imbY6G2gbi75ksLSt5e2noJbAOfNx2hu1lp+A19d+KO+X/gwzvYnytruDA37Q==",
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/rooks/-/rooks-5.7.1.tgz",
"integrity": "sha512-Gztycgm+e+bS0vqLMSGlGe8f7rkXMxjfPj3FucM06/xu1CEFQx1pZ0zMVdWVxDeMXRePaQ2/g1K7ArIlGKyHbQ==",
"dependencies": {
"lodash.debounce": "^4.0.8",
"raf": "^3.4.1"
@ -20364,9 +20364,9 @@
}
},
"node_modules/typescript": {
"version": "4.3.5",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.3.5.tgz",
"integrity": "sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA==",
"version": "4.4.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.3.tgz",
"integrity": "sha512-4xfscpisVgqqDfPaJo5vkd+Qd/ItkoagnHpufr+i2QCHBsNYp+G7UAoyFl8aPtx879u38wPV65rZ8qbGZijalA==",
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@ -24706,9 +24706,9 @@
}
},
"@types/bootstrap": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/@types/bootstrap/-/bootstrap-5.1.0.tgz",
"integrity": "sha512-cR+eQJ/IrgcJZheb7xkKPiPNOa48zkc6fLZ4U9lDNNQp3qiiq3tW1xgrd+VzVJCram/Bnh+DdBsdsPdXKIYClA==",
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/@types/bootstrap/-/bootstrap-5.1.6.tgz",
"integrity": "sha512-3L6IvOCKyoVd3e4bgQTH7VBPbuYEOG8IQbRcuZ0AbjfwPdRX+kVf5L/7mVt1EVM+D/BVw4+71rtp7Z8yYROlpQ==",
"dev": true,
"requires": {
"@popperjs/core": "^2.9.2",
@ -36126,9 +36126,9 @@
}
},
"rooks": {
"version": "5.4.6",
"resolved": "https://registry.npmjs.org/rooks/-/rooks-5.4.6.tgz",
"integrity": "sha512-BTVEPgCC2WFfLEYlvf1jtDPI7imbY6G2gbi75ksLSt5e2noJbAOfNx2hu1lp+A19d+KO+X/gwzvYnytruDA37Q==",
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/rooks/-/rooks-5.7.1.tgz",
"integrity": "sha512-Gztycgm+e+bS0vqLMSGlGe8f7rkXMxjfPj3FucM06/xu1CEFQx1pZ0zMVdWVxDeMXRePaQ2/g1K7ArIlGKyHbQ==",
"requires": {
"lodash.debounce": "^4.0.8",
"raf": "^3.4.1"
@ -37903,9 +37903,9 @@
}
},
"typescript": {
"version": "4.3.5",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.3.5.tgz",
"integrity": "sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA=="
"version": "4.4.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.3.tgz",
"integrity": "sha512-4xfscpisVgqqDfPaJo5vkd+Qd/ItkoagnHpufr+i2QCHBsNYp+G7UAoyFl8aPtx879u38wPV65rZ8qbGZijalA=="
},
"unbox-primitive": {
"version": "1.0.1",

View File

@ -35,7 +35,7 @@
"react-select": "^4",
"react-table": "^7",
"recharts": "^2.0.8",
"rooks": "^5",
"rooks": "^5.7.1",
"sass": "^1",
"socket.io-client": "^4",
"typescript": "^4"

View File

@ -45,6 +45,7 @@ declare module "react-table" {
interface useSelectionProps<D extends Record<string, unknown>> {
isSelecting?: boolean;
onSelect?: (items: D[]) => void;
canSelect?: (item: D) => boolean;
}
interface useSelectionState<D extends Record<string, unknown>> {}

View File

@ -144,7 +144,11 @@ const SettingsGeneralView: FunctionComponent = () => {
</Input>
<Input name="Ignored Addresses">
<Chips settingKey="settings-proxy-exclude"></Chips>
<Message>'*.' as a wildcard for subdomains</Message>
<Message>
List of excluded domains or IP addresses. Asterisk(wildcard),
regex and CIDR are unsupported. You can use '.domain.com' to
include all subdomains.
</Message>
</Input>
</CollapseBox.Content>
</CollapseBox>

View File

@ -76,7 +76,7 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
description: "Hebrew Subtitles Provider",
defaultKey: {
email: "",
hashed_password: ""
hashed_password: "",
},
keyNameOverride: {
hashed_password: "Hashed Password",
@ -135,7 +135,17 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
use_hash: "Use Hash",
},
},
{ key: "podnapisi" },
{
key: "podnapisi",
name: "Podnapisi",
defaultKey: {
verify_ssl: true,
},
keyNameOverride: {
verify_ssl:
"Verify SSL certificate (disabling introduce a MitM attack risk)",
},
},
{
key: "regielive",
name: "RegieLive",
@ -177,6 +187,11 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
description: "Bulgarian Subtitles Provider",
},
{ key: "subsynchro", description: "French Subtitles Provider" },
{
key: "subtitrarinoi",
name: "Subtitrari-noi.ro",
description: "Romanian Subtitles Provider",
},
{
key: "subtitriid",
name: "subtitri.id.lv",
@ -196,13 +211,26 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
password: "",
},
},
{ key: "titrari", name: "Titrari.ro" },
{
key: "titrari",
name: "Titrari.ro",
description: "Mostly Romanian Subtitles Provider",
},
{
key: "tusubtitulo",
name: "Tusubtitulo.com",
description:
"LATAM Spanish / Spanish / English Subtitles Provider for TV Shows",
},
{
key: "titulky",
name: "Titulky.com",
description: "CZ/SK Subtitles Provider. Available only with VIP",
defaultKey: {
username: "",
password: "",
},
},
{ key: "tvsubtitles", name: "TVSubtitles" },
{ key: "wizdom", description: "Wizdom.xyz Subtitles Provider." },
{

View File

@ -295,6 +295,10 @@ const TranslateModal: FunctionComponent<BaseModalProps & ToolModalProps> = ({
const TaskGroupName = "Modifying Subtitles";
const CanSelectSubtitle = (item: TableColumnType) => {
return item.path.endsWith(".srt");
};
const STM: FunctionComponent<BaseModalProps> = ({ ...props }) => {
const payload = useModalPayload<SupportType[]>(props.modalKey);
const [selections, setSelections] = useState<TableColumnType[]>([]);
@ -458,6 +462,7 @@ const STM: FunctionComponent<BaseModalProps> = ({ ...props }) => {
plugins={plugins}
columns={columns}
onSelect={setSelections}
canSelect={CanSelectSubtitle}
data={data}
></SimpleTable>
</BaseModal>

View File

@ -18,22 +18,37 @@ const checkboxId = "---selection---";
interface CheckboxProps {
idIn: string;
disabled?: boolean;
}
const Checkbox = forwardRef<
HTMLInputElement,
TableToggleCommonProps & CheckboxProps
>(({ indeterminate, idIn, ...rest }, ref) => {
>(({ indeterminate, checked, disabled, idIn, ...rest }, ref) => {
const defaultRef = useRef<HTMLInputElement>(null);
const resolvedRef = ref || defaultRef;
useEffect(() => {
if (typeof resolvedRef === "object" && resolvedRef.current) {
resolvedRef.current.indeterminate = indeterminate ?? false;
}
}, [resolvedRef, indeterminate]);
return <Form.Check custom id={idIn} ref={resolvedRef} {...rest}></Form.Check>;
if (disabled) {
resolvedRef.current.checked = false;
} else {
resolvedRef.current.checked = checked ?? false;
}
}
}, [resolvedRef, indeterminate, checked, disabled]);
return (
<Form.Check
custom
disabled={disabled}
id={idIn}
ref={resolvedRef}
{...rest}
></Form.Check>
);
});
function useCustomSelection<T extends object>(hooks: Hooks<T>) {
@ -52,6 +67,7 @@ function useInstance<T extends object>(instance: TableInstance<T>) {
plugins,
rows,
onSelect,
canSelect,
isSelecting,
state: { selectedRowIds },
} = instance;
@ -61,12 +77,17 @@ function useInstance<T extends object>(instance: TableInstance<T>) {
useEffect(() => {
// Performance
if (isSelecting) {
const items = Object.keys(selectedRowIds).flatMap(
let items = Object.keys(selectedRowIds).flatMap(
(v) => rows.find((n) => n.id === v)?.original ?? []
);
if (canSelect) {
items = items.filter((v) => canSelect(v));
}
onSelect && onSelect(items);
}
}, [selectedRowIds, onSelect, rows, isSelecting]);
}, [selectedRowIds, onSelect, rows, isSelecting, canSelect]);
}
function visibleColumns<T extends object>(
@ -83,12 +104,17 @@ function visibleColumns<T extends object>(
{...getToggleAllRowsSelectedProps()}
></Checkbox>
),
Cell: ({ row }: CellProps<any>) => (
<Checkbox
idIn={`table-cell-${row.index}`}
{...row.getToggleRowSelectedProps()}
></Checkbox>
),
Cell: ({ row }: CellProps<any>) => {
const canSelect = instance.canSelect;
const disabled = (canSelect && !canSelect(row.original)) ?? false;
return (
<Checkbox
idIn={`table-cell-${row.index}`}
disabled={disabled}
{...row.getToggleRowSelectedProps()}
></Checkbox>
);
},
};
return [checkbox, ...columns.filter((v) => v.selectHide !== true)];
} else {

View File

@ -37,7 +37,7 @@ class Addic7edSubtitle(_Addic7edSubtitle):
download_link, uploader=None):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired, page_link, series, season, episode,
title, year, version, download_link)
self.release_info = version.replace('+', ',')
self.release_info = version.replace('+', ',') if version else None
self.uploader = uploader
def get_matches(self, video):
@ -249,8 +249,12 @@ class Addic7edProvider(_Addic7edProvider):
for item in movies:
link = item.find('a', href=True)
if link:
type, media_id = link['href'].split('/')
if type == 'movie':
if link['href'].startswith('movie/'):
splitted_uri = link['href'].split('/')
if len(splitted_uri) == 2:
media_id = splitted_uri[1]
else:
continue
media_title = link.text
match = re.search(r'(.+)\s\((\d{4})\)$', media_title)
if match:
@ -492,7 +496,10 @@ class Addic7edProvider(_Addic7edProvider):
page_link = self.server_url + 'movie/' + movie_id
version_matches = re.search(r'Version\s(.+),.+', str(row1.contents[1].contents[1]))
version = version_matches.group(1) if version_matches else None
download_link = row2.contents[8].contents[2].attrs['href'][1:]
try:
download_link = row2.contents[8].contents[3].attrs['href'][1:]
except IndexError:
download_link = row2.contents[8].contents[2].attrs['href'][1:]
uploader = row1.contents[2].contents[8].text.strip()
# set subtitle language to hi if it's hearing_impaired

View File

@ -51,6 +51,15 @@ class KtuvitSubtitle(Subtitle):
self.subtitle_id = subtitle_id
self.release = release
def __repr__(self):
return "<%s [%s] %r [%s:%s]>" % (
self.__class__.__name__,
self.subtitle_id,
self.page_link,
self.language,
self._guessed_encoding,
)
@property
def id(self):
return str(self.subtitle_id)
@ -109,6 +118,7 @@ class KtuvitProvider(Provider):
request_download_id_url = "Services/ContentProvider.svc/RequestSubtitleDownload"
download_link = "Services/DownloadFile.ashx?DownloadIdentifier="
subtitle_class = KtuvitSubtitle
no_subtitle_str = 'אין כתוביות'
_tmdb_api_key = "a51ee051bcd762543373903de296e0a3"
@ -120,7 +130,7 @@ class KtuvitProvider(Provider):
self.hashed_password = hashed_password
self.logged_in = False
self.session = None
self.loginCookie = None
self.login_cookie = None
def initialize(self):
self.session = Session()
@ -131,12 +141,14 @@ class KtuvitProvider(Provider):
data = {"request": {"Email": self.email, "Password": self.hashed_password}}
self.session.headers['Accept-Encoding'] = 'gzip'
self.session.headers['Accept-Language'] = 'en-us,en;q=0.5'
self.session.headers['Pragma'] = 'no-cache'
self.session.headers['Cache-Control'] = 'no-cache'
self.session.headers['Content-Type'] = 'application/json'
self.session.headers['User-Agent']: os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")
self.session.headers["Accept-Encoding"] = "gzip"
self.session.headers["Accept-Language"] = "en-us,en;q=0.5"
self.session.headers["Pragma"] = "no-cache"
self.session.headers["Cache-Control"] = "no-cache"
self.session.headers["Content-Type"] = "application/json"
self.session.headers["User-Agent"]: os.environ.get(
"SZ_USER_AGENT", "Sub-Zero/2"
)
r = self.session.post(
self.server_url + self.sign_in_url,
@ -146,29 +158,36 @@ class KtuvitProvider(Provider):
)
if r.content:
is_success = False
try:
responseContent = r.json()
is_success = self.parse_d_response(
r, "IsSuccess", False, "Authentication to the provider"
)
except json.decoder.JSONDecodeError:
AuthenticationError("Unable to parse JSON return while authenticating to the provider.")
logger.info("Failed to Login to Ktuvit")
if not is_success:
error_message = ''
try:
error_message = self.parse_d_response(r, "ErrorMessage", "[None]")
except json.decode.JSONDecoderError:
raise AuthenticationError(
"Error Logging in to Ktuvit Provider: " + str(r.content)
)
raise AuthenticationError(
"Error Logging in to Ktuvit Provider: " + error_message
)
else:
isSuccess = False
if 'd' in responseContent:
responseContent = json.loads(responseContent['d'])
isSuccess = responseContent.get('IsSuccess', False)
if not isSuccess:
AuthenticationError("ErrorMessage: " + responseContent['d'].get("ErrorMessage", "[None]"))
else:
AuthenticationError("Incomplete JSON returned while authenticating to the provider.")
cookie_split = r.headers["set-cookie"].split("Login=")
if len(cookie_split) != 2:
self.logged_in = False
raise AuthenticationError(
"Login Failed, didn't receive valid cookie in response"
)
logger.debug("Logged in")
self.loginCookie = (
r.headers["set-cookie"][1].split(";")[0].replace("Login=", "")
)
self.login_cookie = cookie_split[1].split(";")[0]
logger.debug("Logged in with cookie: " + self.login_cookie)
self.session.headers["Accept"]="application/json, text/javascript, */*; q=0.01"
self.session.headers["Cookie"]="Login=" + self.loginCookie
self.logged_in = True
self.logged_in = True
def terminate(self):
self.session.close()
@ -226,6 +245,10 @@ class KtuvitProvider(Provider):
def query(
self, title, season=None, episode=None, year=None, filename=None, imdb_id=None
):
if not self.logged_in:
logger.info("Not logged in to Ktuvit. Returning 0 results")
return {}
# search for the IMDB ID if needed.
is_movie = not (season and episode)
imdb_id = imdb_id or self._search_imdb_id(title, year, is_movie)
@ -260,31 +283,21 @@ class KtuvitProvider(Provider):
logger.debug("Getting the list of subtitles")
url = self.server_url + self.search_url
r = self.session.post(
url, json={"request": query}, timeout=10
)
logger.debug("Calling URL: {} with request: {}".format(url, str({"request": query})))
r = self.session.post(url, json={"request": query}, timeout=10)
r.raise_for_status()
if r.content:
try:
responseContent = r.json()
except json.decoder.JSONDecodeError:
json.decoder.JSONDecodeError("Unable to parse JSON returned while getting Film/Series Information.")
else:
isSuccess = False
if 'd' in responseContent:
responseContent = json.loads(responseContent['d'])
results = responseContent.get('Films', [])
else:
json.decoder.JSONDecodeError("Incomplete JSON returned while getting Film/Series Information.")
results = self.parse_d_response(r, "Films", [], "Films/Series Information")
else:
return {}
return {}
# loop over results
subtitles = {}
for result in results:
imdb_link = result["IMDB_Link"]
imdb_link = imdb_link[0: -1] if imdb_link.endswith("/") else imdb_link
imdb_link = imdb_link[0:-1] if imdb_link.endswith("/") else imdb_link
results_imdb_id = imdb_link.split("/")[-1]
if results_imdb_id != imdb_id:
@ -305,6 +318,7 @@ class KtuvitProvider(Provider):
else:
subs = self._search_tvshow(ktuvit_id, season, episode)
logger.debug('Got {} Subs from Ktuvit'.format(len(subs)))
for sub in subs:
# otherwise create it
subtitle = KtuvitSubtitle(
@ -338,8 +352,16 @@ class KtuvitProvider(Provider):
r = self.session.get(url, timeout=10)
r.raise_for_status()
if len(r.content) < 10:
logger.debug("Too short content-length in response: [{}]. Treating as No Subtitles Found ".format(str(r.content)))
return []
sub_list = ParserBeautifulSoup(r.content, ["html.parser"])
sub_rows = sub_list.find_all("tr")
sub_rows = sub_list("tr")
if sub_list.find("tr") and sub_list.find("tr").find("td") and sub_list.find("tr").find("td").get_text() == self.no_subtitle_str:
logger.debug("No Subtitles Found. URL " + url)
return subs
for row in sub_rows:
columns = row.find_all("td")
@ -347,11 +369,14 @@ class KtuvitProvider(Provider):
for index, column in enumerate(columns):
if index == 0:
sub['rls'] = column.get_text().strip().split("\n")[0]
sub["rls"] = column.get_text().strip().split("\n")[0]
if index == 5:
sub['sub_id'] = column.find("input", attrs={"data-sub-id": True})["data-sub-id"]
sub["sub_id"] = column.find("input", attrs={"data-sub-id": True})[
"data-sub-id"
]
subs.append(sub)
if 'sub_id' in sub:
subs.append(sub)
return subs
def _search_movie(self, movie_id):
@ -360,28 +385,32 @@ class KtuvitProvider(Provider):
r = self.session.get(url, timeout=10)
r.raise_for_status()
if len(r.content) < 10:
logger.debug("Too short content-length in response: [{}]. Treating as No Subtitles Found ".format(str(r.content)))
return []
html = ParserBeautifulSoup(r.content, ["html.parser"])
sub_rows = html.select("table#subtitlesList tbody > tr")
for row in sub_rows:
columns = row.find_all("td")
sub = {
'id': movie_id
}
sub = {"id": movie_id}
for index, column in enumerate(columns):
if index == 0:
sub['rls'] = column.get_text().strip().split("\n")[0]
sub["rls"] = column.get_text().strip().split("\n")[0]
if index == 5:
sub['sub_id'] = column.find("a", attrs={"data-subtitle-id": True})["data-subtitle-id"]
sub["sub_id"] = column.find("a", attrs={"data-subtitle-id": True})[
"data-subtitle-id"
]
subs.append(sub)
if 'sub_id' in sub:
subs.append(sub)
return subs
def list_subtitles(self, video, languages):
season = episode = None
year = video.year
filename = video.name
imdb_id = video.imdb_id
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
@ -405,7 +434,7 @@ class KtuvitProvider(Provider):
def download_subtitle(self, subtitle):
if isinstance(subtitle, KtuvitSubtitle):
downloadIdentifierRequest = {
download_identifier_request = {
"FilmID": subtitle.ktuvit_id,
"SubtitleID": subtitle.subtitle_id,
"FontSize": 0,
@ -413,32 +442,22 @@ class KtuvitProvider(Provider):
"PredefinedLayout": -1,
}
logger.debug("Download Identifier Request data: " + str(json.dumps({"request": downloadIdentifierRequest})))
logger.debug(
"Download Identifier Request data: "
+ str(json.dumps({"request": download_identifier_request}))
)
# download
url = self.server_url + self.request_download_id_url
r = self.session.post(
url, json={"request": downloadIdentifierRequest}, timeout=10
url, json={"request": download_identifier_request}, timeout=10
)
r.raise_for_status()
if r.content:
try:
responseContent = r.json()
except json.decoder.JSONDecodeError:
json.decoder.JSONDecodeError("Unable to parse JSON returned while getting Download Identifier.")
else:
isSuccess = False
if 'd' in responseContent:
responseContent = json.loads(responseContent['d'])
downloadIdentifier = responseContent.get('DownloadIdentifier', None)
download_identifier = self.parse_d_response(r, "DownloadIdentifier")
if not downloadIdentifier:
json.decoder.JSONDecodeError("Missing Download Identifier.")
else:
json.decoder.JSONDecodeError("Incomplete JSON returned while getting Download Identifier.")
url = self.server_url + self.download_link + downloadIdentifier
url = self.server_url + self.download_link + download_identifier
r = self.session.get(url, timeout=10)
r.raise_for_status()
@ -450,3 +469,32 @@ class KtuvitProvider(Provider):
return
subtitle.content = fix_line_ending(r.content)
def parse_d_response(self, response, field, default_value=None, message=None):
message = message if message else field
try:
response_content = response.json()
except json.decoder.JSONDecodeError as ex:
raise json.decoder.JSONDecodeError(
"Unable to parse JSON returned while getting " + message, ex.doc, ex.pos
)
else:
# kept for manual debugging when needed:
# logger.debug("Parsing d response_content: " + str(response_content))
if "d" in response_content:
response_content = json.loads(response_content["d"])
value = response_content.get(field, default_value)
if not value and value != default_value:
raise json.decoder.JSONDecodeError(
"Missing " + message, str(response_content), 0
)
else:
raise json.decoder.JSONDecodeError(
"Incomplete JSON returned while getting " + message,
str(response_content),
0
)
return value

View File

@ -43,15 +43,17 @@ def fix_tv_naming(title):
def fix_movie_naming(title):
return fix_inconsistent_naming(title, {
}, True)
}, True)
class OpenSubtitlesComSubtitle(Subtitle):
provider_name = 'opensubtitlescom'
hash_verifiable = False
def __init__(self, language, hearing_impaired, page_link, file_id, releases, uploader, title, year,
def __init__(self, language, forced, hearing_impaired, page_link, file_id, releases, uploader, title, year,
hash_matched, hash=None, season=None, episode=None):
language = Language.rebuild(language, hi=hearing_impaired, forced=forced)
self.title = title
self.year = year
self.season = season
@ -60,6 +62,7 @@ class OpenSubtitlesComSubtitle(Subtitle):
self.release_info = releases
self.language = language
self.hearing_impaired = hearing_impaired
self.forced = forced
self.file_id = file_id
self.page_link = page_link
self.download_link = None
@ -177,21 +180,28 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
else:
raise ProviderError('Bad status code: {}'.format(r.status_code))
@staticmethod
def sanitize_external_ids(external_id):
if isinstance(external_id, str):
external_id = external_id.lower().lstrip('tt')
sanitized_id = external_id[:-1].lstrip('0') + external_id[-1]
return int(sanitized_id)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def search_titles(self, title):
title_id = None
imdb_id = None
if isinstance(self.video, Episode) and self.video.series_imdb_id:
imdb_id = self.video.series_imdb_id
imdb_id = self.sanitize_external_ids(self.video.series_imdb_id)
elif isinstance(self.video, Movie) and self.video.imdb_id:
imdb_id = self.video.imdb_id
imdb_id = self.sanitize_external_ids(self.video.imdb_id)
if imdb_id:
parameters = {'imdb_id': imdb_id}
logging.debug('Searching using this IMDB id: {}'.format(imdb_id))
else:
parameters = {'query': title}
parameters = {'query': title.lower()}
logging.debug('Searching using this title: {}'.format(title))
results = self.session.get(self.server_url + 'features', params=parameters, timeout=30)
@ -227,7 +237,7 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
if title_id:
logging.debug('Found this title ID: {}'.format(title_id))
return title_id
return self.sanitize_external_ids(title_id)
finally:
if not title_id:
logger.debug('No match found for {}'.format(title))
@ -248,24 +258,37 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
title_id = self.search_titles(title)
if not title_id:
return []
lang_strings = [str(lang) for lang in languages]
lang_strings = [str(lang.basename) for lang in languages]
only_foreign = all([lang.forced for lang in languages])
also_foreign = any([lang.forced for lang in languages])
if only_foreign:
forced = 'only'
elif also_foreign:
forced = 'include'
else:
forced = 'exclude'
langs = ','.join(lang_strings)
logging.debug('Searching for this languages: {}'.format(lang_strings))
# query the server
if isinstance(self.video, Episode):
res = self.session.get(self.server_url + 'subtitles',
params={'parent_feature_id': title_id,
'languages': langs,
'episode_number': self.video.episode,
'season_number': self.video.season,
'moviehash': hash},
params=(('episode_number', self.video.episode),
('foreign_parts_only', forced),
('languages', langs.lower()),
('moviehash', hash),
('parent_feature_id', title_id),
('season_number', self.video.season),
('query', os.path.basename(self.video.name))),
timeout=30)
else:
res = self.session.get(self.server_url + 'subtitles',
params={'id': title_id,
'languages': langs,
'moviehash': hash},
params=(('foreign_parts_only', forced),
('id', title_id),
('languages', langs.lower()),
('moviehash', hash),
('query', os.path.basename(self.video.name))),
timeout=30)
if res.status_code == 429:
@ -278,6 +301,8 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
try:
result = res.json()
if 'data' not in result:
raise ValueError
except ValueError:
raise ProviderError('Invalid JSON returned by provider')
else:
@ -302,18 +327,19 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
if len(item['attributes']['files']):
subtitle = OpenSubtitlesComSubtitle(
language=Language.fromietf(item['attributes']['language']),
hearing_impaired=item['attributes']['hearing_impaired'],
page_link=item['attributes']['url'],
file_id=item['attributes']['files'][0]['file_id'],
releases=item['attributes']['release'],
uploader=item['attributes']['uploader']['name'],
title=item['attributes']['feature_details']['movie_name'],
year=item['attributes']['feature_details']['year'],
season=season_number,
episode=episode_number,
hash_matched=moviehash_match
)
language=Language.fromietf(item['attributes']['language']),
forced=item['attributes']['foreign_parts_only'],
hearing_impaired=item['attributes']['hearing_impaired'],
page_link=item['attributes']['url'],
file_id=item['attributes']['files'][0]['file_id'],
releases=item['attributes']['release'],
uploader=item['attributes']['uploader']['name'],
title=item['attributes']['feature_details']['movie_name'],
year=item['attributes']['feature_details']['year'],
season=season_number,
episode=episode_number,
hash_matched=moviehash_match
)
subtitle.get_matches(self.video)
subtitles.append(subtitle)

View File

@ -109,10 +109,12 @@ class PodnapisiSubtitle(_PodnapisiSubtitle):
return matches
class PodnapisiAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
ctx = ssl.create_default_context()
ctx.set_ciphers('DEFAULT@SECLEVEL=1')
ctx.check_hostname = False
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
@ -121,6 +123,7 @@ class PodnapisiAdapter(HTTPAdapter):
ssl_context=ctx
)
class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
languages = ({Language('por', 'BR'), Language('srp', script='Latn'), Language('srp', script='Cyrl')} |
{Language.fromalpha2(l) for l in language_converters['alpha2'].codes})
@ -130,12 +133,14 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
server_url = 'https://podnapisi.net/subtitles/'
only_foreign = False
also_foreign = False
verify_ssl = True
subtitle_class = PodnapisiSubtitle
hearing_impaired_verifiable = True
def __init__(self, only_foreign=False, also_foreign=False):
def __init__(self, only_foreign=False, also_foreign=False, verify_ssl=True):
self.only_foreign = only_foreign
self.also_foreign = also_foreign
self.verify_ssl = verify_ssl
if only_foreign:
logger.info("Only searching for foreign/forced subtitles")
@ -145,6 +150,7 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
def initialize(self):
super().initialize()
self.session.mount('https://', PodnapisiAdapter())
self.session.verify = self.verify_ssl
def list_subtitles(self, video, languages):
if video.is_special:

View File

@ -0,0 +1,321 @@
# coding=utf-8
from __future__ import absolute_import
import os
import io
import logging
import re
import rarfile
from random import randint
from zipfile import ZipFile, is_zipfile
from rarfile import RarFile, is_rarfile
from guessit import guessit
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from subliminal.exceptions import ProviderError
from subliminal.providers import ParserBeautifulSoup
from subliminal.video import Episode, Movie
from subliminal.subtitle import SUBTITLE_EXTENSIONS
from subzero.language import Language
# parsing regex definitions
title_re = re.compile(r'(?P<title>(?:.+(?= [Aa][Kk][Aa] ))|.+)(?:(?:.+)(?P<altitle>(?<= [Aa][Kk][Aa] ).+))?')
def fix_inconsistent_naming(title):
"""Fix titles with inconsistent naming using dictionary and sanitize them.
:param str title: original title.
:return: new title.
:rtype: str
"""
return _fix_inconsistent_naming(title, {"DC's Legends of Tomorrow": "Legends of Tomorrow",
"Marvel's Jessica Jones": "Jessica Jones"})
logger = logging.getLogger(__name__)
# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile`
rarfile.PATH_SEP = '/'
class SubtitrarinoiSubtitle(Subtitle):
provider_name = 'subtitrarinoi'
def __init__(self, language, download_link, sid, comments, title, imdb_id, uploader, page_link, year=None,
download_count=None, is_episode=False, desired_episode=False):
super(SubtitrarinoiSubtitle, self).__init__(language)
self.sid = sid
self.title = title
self.imdb_id = imdb_id
self.download_link = download_link
self.year = year
self.download_count = download_count
self.comments = self.releases = self.release_info = ",".join(comments.split(";"))
self.matches = None
self.uploader = uploader
self.page_link = page_link
self.is_episode = is_episode
self.desired_episode = desired_episode
@property
def id(self):
return self.sid
def __str__(self):
return self.title + "(" + str(self.year) + ")" + " -> " + self.download_link
def __repr__(self):
return self.title + "(" + str(self.year) + ")"
def get_matches(self, video):
matches = set()
if video.year and self.year == video.year:
matches.add('year')
if video.release_group and video.release_group in self.comments:
matches.add('release_group')
if isinstance(video, Movie):
# title
if video.title and sanitize(self.title) == fix_inconsistent_naming(video.title):
matches.add('title')
# imdb
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# guess match others
matches |= guess_matches(video, guessit(self.comments, {"type": "movie"}))
else:
# title
seasonless_title = re.sub(r'\s-\sSezonul\s\d+$', '', self.title.rstrip())
if video.series and fix_inconsistent_naming(video.series) == sanitize(seasonless_title):
matches.add('series')
# imdb
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('imdb_id')
# season
if f"Sezonul {video.season}" in self.comments:
matches.add('season')
# episode
if {"imdb_id", "season"}.issubset(matches):
matches.add('episode')
# guess match others
matches |= guess_matches(video, guessit(self.comments, {"type": "episode"}))
self.matches = matches
return matches
class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin):
subtitle_class = SubtitrarinoiSubtitle
languages = {Language(lang) for lang in ['ron']}
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
server_url = 'https://www.subtitrari-noi.ro/'
api_url = server_url + 'paginare_filme.php'
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4535.2 Safari/537.36'
self.session.headers['X-Requested-With'] = 'XMLHttpRequest'
self.session.headers['Referer'] = self.server_url
def terminate(self):
self.session.close()
def query(self, languages=None, title=None, imdb_id=None, video=None):
subtitles = []
params = self.getQueryParams(imdb_id, title)
search_response = self.session.post(self.api_url, data=params, timeout=15)
search_response.raise_for_status()
soup = ParserBeautifulSoup(search_response.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
# loop over subtitle cells
rows = soup.select('div[id="round"]')
if len(rows) == 0:
logger.debug('No data returned from provider')
return []
# release comments are outside of the parent for the sub details itself, so we just map it to another list
comment_rows = soup.findAll('div', attrs={'class': None, 'id': None, 'align': None})
for index, row in enumerate(rows):
result_anchor_el = row.select_one('.buton').select('a')
# Download link
href = result_anchor_el[0]['href']
download_link = self.server_url + href
fullTitle = row.select_one('#content-main a').text
# Get title
try:
title = fullTitle.split("(")[0]
except:
logger.error("Error parsing title")
# Get Uploader
try:
uploader = row.select('#content-main p')[4].text[10:]
except:
logger.error("Error parsing uploader")
# Get downloads count
downloads = 0
try:
downloads = int(row.select_one('#content-right p').text[12:])
except:
logger.error("Error parsing downloads")
# Get year
try:
year = int(fullTitle.split("(")[1].split(")")[0])
except:
year = None
logger.error("Error parsing year")
# Get imdbId
sub_imdb_id = self.getImdbIdFromSubtitle(row)
comments = ''
try:
comments = comment_rows[index].text
logger.debug('Comments: {}'.format(comments))
except:
logger.error("Error parsing comments")
# Get Page Link
try:
page_link = row.select_one('#content-main a')['href']
except:
logger.error("Error parsing page_link")
episode_number = video.episode if isinstance(video, Episode) else None
subtitle = self.subtitle_class(next(iter(languages)), download_link, index, comments, title, sub_imdb_id, uploader, page_link, year, downloads, isinstance(video, Episode), episode_number)
logger.debug('Found subtitle %r', str(subtitle))
subtitles.append(subtitle)
ordered_subs = self.order(subtitles)
return ordered_subs
@staticmethod
def order(subtitles):
logger.debug("Sorting by download count...")
sorted_subs = sorted(subtitles, key=lambda s: s.download_count, reverse=True)
return sorted_subs
@staticmethod
def getImdbIdFromSubtitle(row):
imdbId = None
try:
imdbId = row.select('div[id=content-right] a')[-1].find_all(src=re.compile("imdb"))[0].parent.get('href').split("tt")[-1]
except:
logger.error("Error parsing imdb id")
if imdbId is not None:
return "tt" + imdbId
else:
return None
# subtitrari-noi.ro params
# info: there seems to be no way to do an advanced search by imdb_id or title
# the page seems to populate both "search_q" and "cautare" with the same value
# search_q = ?
# cautare = search string
# tip = type of search (0: premiere - doesn't return anything, 1: series only, 2: both, I think, not sure on that)
# an = year
# gen = genre
def getQueryParams(self, imdb_id, title):
queryParams = {
'search_q': '1',
'tip': '2',
'an': 'Toti anii',
'gen': 'Toate',
}
if imdb_id is not None:
queryParams["cautare"] = imdb_id
elif title is not None:
queryParams["cautare"] = title
queryParams["query_q"] = queryParams["cautare"]
return queryParams
def list_subtitles(self, video, languages):
title = fix_inconsistent_naming(video.title)
imdb_id = None
try:
if isinstance(video, Episode):
imdb_id = video.series_imdb_id[2:]
else:
imdb_id = video.imdb_id[2:]
except:
logger.error('Error parsing imdb_id from video object {}'.format(str(video)))
subtitles = [s for s in
self.query(languages, title, imdb_id, video)]
return subtitles
def download_subtitle(self, subtitle):
r = self.session.get(subtitle.download_link, headers={'Referer': self.api_url}, timeout=10)
r.raise_for_status()
# open the archive
archive_stream = io.BytesIO(r.content)
if is_rarfile(archive_stream):
logger.debug('Archive identified as rar')
archive = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug('Archive identified as zip')
archive = ZipFile(archive_stream)
else:
subtitle.content = r.content
if subtitle.is_valid():
return
subtitle.content = None
raise ProviderError('Unidentified archive type')
if subtitle.is_episode:
subtitle.content = self._get_subtitle_from_archive(subtitle, archive)
else:
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
@staticmethod
def _get_subtitle_from_archive(subtitle, archive):
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
_guess = guessit(name)
if subtitle.desired_episode == _guess['episode']:
return archive.read(name)
return None
# vim: set expandtab ts=4 sw=4:

View File

@ -43,20 +43,26 @@ logger = logging.getLogger(__name__)
# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile`
rarfile.PATH_SEP = '/'
class TitrariSubtitle(Subtitle):
provider_name = 'titrari'
def __init__(self, language, download_link, sid, releases, title, imdb_id, year=None, download_count=None, comments=None):
def __init__(self, language, download_link, sid, comments, title, imdb_id, page_link, uploader, year=None,
download_count=None, is_episode=False, desired_episode=None):
super(TitrariSubtitle, self).__init__(language)
self.sid = sid
self.title = title
self.imdb_id = imdb_id
self.download_link = download_link
self.page_link = page_link
self.uploader = uploader
self.year = year
self.download_count = download_count
self.releases = self.release_info = releases
self.comments = comments
self.comments = self.releases = self.release_info = comments
self.matches = None
self.is_episode = is_episode
self.desired_episode = desired_episode
@property
def id(self):
@ -71,22 +77,45 @@ class TitrariSubtitle(Subtitle):
def get_matches(self, video):
matches = set()
if video.year and self.year == video.year:
matches.add('year')
if video.release_group and video.release_group in self.comments:
matches.add('release_group')
if isinstance(video, Movie):
# title
if video.title and sanitize(self.title) == fix_inconsistent_naming(video.title):
matches.add('title')
if video.year and self.year == video.year:
matches.add('year')
# imdb
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
if video.release_group and video.release_group in self.comments:
matches.add('release_group')
# guess match others
matches |= guess_matches(video, guessit(self.comments, {"type": "movie"}))
else:
# title
seasonless_title = re.sub(r'\s-\sSezonul\s\d+$', '', self.title.rstrip())
if video.series and fix_inconsistent_naming(video.series) == sanitize(seasonless_title):
matches.add('series')
# imdb
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('imdb_id')
# season
if f"Sezonul {video.season}" in self.title:
matches.add('season')
# episode
if {"imdb_id", "season"}.issubset(matches):
matches.add('episode')
# guess match others
matches |= guess_matches(video, guessit(self.comments, {"type": "episode"}))
self.matches = matches
return matches
@ -94,37 +123,40 @@ class TitrariSubtitle(Subtitle):
class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
subtitle_class = TitrariSubtitle
languages = {Language(l) for l in ['ron', 'eng']}
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
languages = {Language(lang) for lang in ['ron', 'eng']}
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
api_url = 'https://www.titrari.ro/'
query_advanced_search = 'cautarenedevansata'
query_advanced_search = 'cautarepreaavansata'
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
# Hardcoding the UA to bypass the 30s throttle that titrari.ro uses for IP/UA pair
self.session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, ' \
'like Gecko) Chrome/93.0.4535.2 Safari/537.36'
# self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
def terminate(self):
self.session.close()
def query(self, languages=None, title=None, imdb_id=None, video=None):
def query(self, language=None, title=None, imdb_id=None, video=None):
subtitles = []
params = self.getQueryParams(imdb_id, title)
params = self.getQueryParams(imdb_id, title, language)
search_response = self.session.get(self.api_url, params=params, timeout=15)
search_response.raise_for_status()
if not search_response.content:
logger.debug('[#### Provider: titrari.ro] No data returned from provider')
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(search_response.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
# loop over subtitle cells
rows = soup.select('td[rowspan=\'5\']')
rows = soup.select('td[rowspan="5"]')
for index, row in enumerate(rows):
result_anchor_el = row.select_one('a')
@ -132,81 +164,126 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
href = result_anchor_el.get('href')
download_link = self.api_url + href
fullTitle = row.parent.find("h1").find("a").text
fullTitle = row.parent.select('h1 a')[0].text
#Get title
# Get title
try:
title = fullTitle.split("(")[0]
except:
logger.error("[#### Provider: titrari.ro] Error parsing title.")
logger.error("Error parsing title")
# Get downloads count
downloads = 0
try:
downloads = int(row.parent.parent.select("span")[index].text[12:])
downloads = int(row.parent.parent.select('span')[index].text[12:])
except:
logger.error("[#### Provider: titrari.ro] Error parsing downloads.")
logger.error("Error parsing downloads")
# Get year
try:
year = int(fullTitle.split("(")[1].split(")")[0])
except:
year = None
logger.error("[#### Provider: titrari.ro] Error parsing year.")
logger.error("Error parsing year")
# Get imdbId
sub_imdb_id = self.getImdbIdFromSubtitle(row)
comments = ''
try:
comments = row.parent.parent.find_all("td", class_=re.compile("comment"))[index*2+1].text
comments = row.parent.parent.select('.comment')[1].text
except:
logger.error("Error parsing comments.")
logger.error("Error parsing comments")
subtitle = self.subtitle_class(next(iter(languages)), download_link, index, None, title, sub_imdb_id, year, downloads, comments)
logger.debug('[#### Provider: titrari.ro] Found subtitle %r', str(subtitle))
# Get page_link
try:
page_link = self.api_url + row.parent.select('h1 a')[0].get('href')
except:
logger.error("Error parsing page_link")
# Get uploader
try:
uploader = row.parent.select('td.row1.stanga a')[-1].text
except:
logger.error("Error parsing uploader")
episode_number = video.episode if isinstance(video, Episode) else None
subtitle = self.subtitle_class(language, download_link, index, comments, title, sub_imdb_id, page_link, uploader,
year, downloads, isinstance(video, Episode), episode_number)
logger.debug('Found subtitle %r', str(subtitle))
subtitles.append(subtitle)
ordered_subs = self.order(subtitles, video)
ordered_subs = self.order(subtitles)
return ordered_subs
def order(self, subtitles, video):
logger.debug("[#### Provider: titrari.ro] Sorting by download count...")
@staticmethod
def order(subtitles):
logger.debug("Sorting by download count...")
sorted_subs = sorted(subtitles, key=lambda s: s.download_count, reverse=True)
return sorted_subs
def getImdbIdFromSubtitle(self, row):
@staticmethod
def getImdbIdFromSubtitle(row):
imdbId = None
try:
imdbId = row.parent.parent.find_all(src=re.compile("imdb"))[0].parent.get('href').split("tt")[-1]
except:
logger.error("[#### Provider: titrari.ro] Error parsing imdbId.")
logger.error("Error parsing imdb id")
if imdbId is not None:
return "tt" + imdbId
else:
return None
def getQueryParams(self, imdb_id, title):
# titrari.ro seems to require all parameters now
# z2 = comment (empty)
# z3 = fps (-1: any, 0: N/A, 1: 23.97 FPS etc.)
# z4 = CD count (-1: any)
# z5 = imdb_id (empty or integer)
# z6 = sort order (0: unsorted, 1: by date, 2: by name)
# z7 = title (empty or string)
# z8 = language (-1: all, 1: ron, 2: eng)
# z9 = genre (All: all, Action: action etc.)
# z11 = type (0: any, 1: movie, 2: series)
def getQueryParams(self, imdb_id, title, language):
queryParams = {
'page': self.query_advanced_search,
'z8': '1'
'z7': '',
'z2': '',
'z5': '',
'z3': '-1',
'z4': '-1',
'z8': '-1',
'z9': 'All',
'z11': '0',
'z6': '0'
}
if imdb_id is not None:
queryParams["z5"] = imdb_id
elif title is not None:
queryParams["z7"] = title
if language == 'ro':
queryParams["z8"] = '1'
elif language == 'en':
queryParams["z8"] = '2'
return queryParams
def list_subtitles(self, video, languages):
title = fix_inconsistent_naming(video.title)
imdb_id = None
try:
imdb_id = video.imdb_id[2:]
if isinstance(video, Episode):
imdb_id = video.series_imdb_id[2:]
else:
imdb_id = video.imdb_id[2:]
except:
logger.error("[#### Provider: titrari.ro] Error parsing video.imdb_id.")
logger.error('Error parsing imdb_id from video object {}'.format(str(video)))
return [s for s in
self.query(languages, title, imdb_id, video)]
subtitles = [s for lang in languages for s in
self.query(lang, title, imdb_id, video)]
return subtitles
def download_subtitle(self, subtitle):
r = self.session.get(subtitle.download_link, headers={'Referer': self.api_url}, timeout=10)
@ -215,10 +292,10 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
# open the archive
archive_stream = io.BytesIO(r.content)
if is_rarfile(archive_stream):
logger.debug('[#### Provider: titrari.ro] Archive identified as rar')
logger.debug('Archive identified as RAR')
archive = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug('[#### Provider: titrari.ro] Archive identified as zip')
logger.debug('Archive identified as ZIP')
archive = ZipFile(archive_stream)
else:
subtitle.content = r.content
@ -226,23 +303,28 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
return
subtitle.content = None
raise ProviderError('[#### Provider: titrari.ro] Unidentified archive type')
raise ProviderError('Unidentified archive type')
subtitle.releases = _get_releases_from_archive(archive)
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
if subtitle.is_episode:
subtitle.content = self._get_subtitle_from_archive(subtitle, archive)
else:
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
@staticmethod
def _get_subtitle_from_archive(subtitle, archive):
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
def _get_releases_from_archive(archive):
releases = []
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
_guess = guessit(name)
if subtitle.desired_episode == _guess['episode']:
return archive.read(name)
releases.append(os.path.splitext(os.path.split(name)[1])[0])
return None
return releases
# vim: set expandtab ts=4 sw=4: