[youtube:playlist] Use an iterator for the entries (closes #5935)

So that '--playlist-end' downloads only the required pages.
This commit is contained in:
Jaime Marquínez Ferrándiz 2015-06-09 23:49:11 +02:00
parent bd5bc0cd5a
commit 70219b0f43
1 changed files with 23 additions and 24 deletions

View File

@ -1290,7 +1290,6 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
def _extract_playlist(self, playlist_id): def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id) page = self._download_webpage(url, playlist_id)
more_widget_html = content_html = page
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page): for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
match = match.strip() match = match.strip()
@ -1310,36 +1309,36 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
self.report_warning('Youtube gives an alert message: ' + match) self.report_warning('Youtube gives an alert message: ' + match)
# Extract the video ids from the playlist pages # Extract the video ids from the playlist pages
ids = [] def _entries():
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
for vid_id in new_ids:
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
for page_num in itertools.count(1): mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
matches = re.finditer(self._VIDEO_RE, content_html) if not mobj:
# We remove the duplicates and the link with index 0 break
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) more = self._download_json(
if not mobj: 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
break 'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
more = self._download_json( content_html = more['content_html']
'https://youtube.com/%s' % mobj.group('more'), playlist_id, if not content_html.strip():
'Downloading page #%s' % page_num, # Some webpages show a "Load more" button but they don't
transform_source=uppercase_escape) # have more videos
content_html = more['content_html'] break
if not content_html.strip(): more_widget_html = more['load_more_widget_html']
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex( playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>', r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title') page, 'title')
url_results = self._ids_to_results(ids) return self.playlist_result(_entries(), playlist_id, playlist_title)
return self.playlist_result(url_results, playlist_id, playlist_title)
def _real_extract(self, url): def _real_extract(self, url):
# Extract playlist id # Extract playlist id