mirror of
https://github.com/blackjack4494/yt-dlc.git
synced 2024-12-22 15:57:23 +00:00
[teamcoco] Rewrite preload data extraction
Idea: "puncture" some consecutive fragments and check whether the b64decode result of a punctured string is a valid JSON or not. It's a O(N^3) algorithm, but should be fast for a small N (less than 30 fragments in all test cases)
This commit is contained in:
parent
1ae72fb23d
commit
3a105f7b20
1 changed files with 26 additions and 27 deletions
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||||
import base64
|
import base64
|
||||||
import binascii
|
import binascii
|
||||||
import re
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -68,41 +69,39 @@ class TeamcocoIE(InfoExtractor):
|
||||||
video_id = self._html_search_regex(
|
video_id = self._html_search_regex(
|
||||||
self._VIDEO_ID_REGEXES, webpage, 'video id')
|
self._VIDEO_ID_REGEXES, webpage, 'video id')
|
||||||
|
|
||||||
data = preload = None
|
data = None
|
||||||
preloads = re.findall(r'"preload":\s*"([^"]+)"', webpage)
|
|
||||||
if preloads:
|
|
||||||
preload = max([(len(p), p) for p in preloads])[1]
|
|
||||||
|
|
||||||
if not preload:
|
preload_codes = self._html_search_regex(
|
||||||
preload = ''.join(re.findall(r'this\.push\("([^"]+)"\);', webpage))
|
r'(function.+)setTimeout\(function\(\)\{playlist',
|
||||||
|
webpage, 'preload codes')
|
||||||
|
base64_fragments = re.findall(r'"([a-zA-z0-9+/=]+)"', preload_codes)
|
||||||
|
base64_fragments.remove('init')
|
||||||
|
|
||||||
if not preload:
|
def _check_sequence(cur_fragments):
|
||||||
preload = self._html_search_regex([
|
if not cur_fragments:
|
||||||
r'player,\[?"([^"]+)"\]?', r'player.init\(\[?"([^"]+)"\]?\)'
|
return
|
||||||
], webpage.replace('","', ''), 'preload data', default=None)
|
for i in range(len(cur_fragments)):
|
||||||
|
cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
|
||||||
if not preload:
|
|
||||||
preload_codes = self._html_search_regex(
|
|
||||||
r'(function.+)setTimeout\(function\(\)\{playlist',
|
|
||||||
webpage, 'preload codes')
|
|
||||||
base64_fragments = re.findall(r'"([a-zA-z0-9+/=]+)"', preload_codes)
|
|
||||||
base64_fragments.remove('init')
|
|
||||||
for i in range(len(base64_fragments)):
|
|
||||||
cur_sequence = (''.join(base64_fragments[i:] + base64_fragments[:i])).encode('ascii')
|
|
||||||
try:
|
try:
|
||||||
raw_data = base64.b64decode(cur_sequence)
|
raw_data = base64.b64decode(cur_sequence)
|
||||||
except (TypeError, binascii.Error):
|
if compat_ord(raw_data[0]) == compat_ord('{'):
|
||||||
|
return json.loads(raw_data.decode('utf-8'))
|
||||||
|
except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
|
||||||
continue
|
continue
|
||||||
if compat_ord(raw_data[0]) == compat_ord('{'):
|
|
||||||
data = self._parse_json(raw_data.decode('utf-8'), video_id, fatal=False)
|
|
||||||
|
|
||||||
if not preload and not data:
|
def _check_data():
|
||||||
raise ExtractorError(
|
for i in range(len(base64_fragments) + 1):
|
||||||
'Preload information could not be extracted', expected=True)
|
for j in range(i, len(base64_fragments) + 1):
|
||||||
|
data = _check_sequence(base64_fragments[:i] + base64_fragments[j:])
|
||||||
|
if data:
|
||||||
|
return data
|
||||||
|
|
||||||
|
self.to_screen('Try to compute possible data sequence. This may take some time.')
|
||||||
|
data = _check_data()
|
||||||
|
|
||||||
if not data:
|
if not data:
|
||||||
data = self._parse_json(
|
raise ExtractorError(
|
||||||
base64.b64decode(preload.encode('ascii')).decode('utf-8'), video_id)
|
'Preload information could not be extracted', expected=True)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
|
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
|
||||||
|
|
Loading…
Reference in a new issue