mirror of
https://github.com/blackjack4494/yt-dlc.git
synced 2024-12-27 02:09:06 +00:00
fixed PEP8 whitespace issues
mostly vertical whitespace and mixed spaces and tabs
This commit is contained in:
parent
33d507f1fe
commit
c0a10ca8dc
1 changed files with 55 additions and 33 deletions
88
youtube-dl
88
youtube-dl
|
@ -47,6 +47,7 @@ std_headers = {
|
||||||
|
|
||||||
simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
|
simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
|
||||||
|
|
||||||
|
|
||||||
def preferredencoding():
|
def preferredencoding():
|
||||||
"""Get preferred encoding.
|
"""Get preferred encoding.
|
||||||
|
|
||||||
|
@ -63,6 +64,7 @@ def preferredencoding():
|
||||||
yield pref
|
yield pref
|
||||||
return yield_preferredencoding().next()
|
return yield_preferredencoding().next()
|
||||||
|
|
||||||
|
|
||||||
def htmlentity_transform(matchobj):
|
def htmlentity_transform(matchobj):
|
||||||
"""Transforms an HTML entity to a Unicode character.
|
"""Transforms an HTML entity to a Unicode character.
|
||||||
|
|
||||||
|
@ -89,11 +91,13 @@ def htmlentity_transform(matchobj):
|
||||||
# Unknown entity in name, return its literal representation
|
# Unknown entity in name, return its literal representation
|
||||||
return (u'&%s;' % entity)
|
return (u'&%s;' % entity)
|
||||||
|
|
||||||
|
|
||||||
def sanitize_title(utitle):
|
def sanitize_title(utitle):
|
||||||
"""Sanitizes a video title so it could be used as part of a filename."""
|
"""Sanitizes a video title so it could be used as part of a filename."""
|
||||||
utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
|
utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
|
||||||
return utitle.replace(unicode(os.sep), u'%')
|
return utitle.replace(unicode(os.sep), u'%')
|
||||||
|
|
||||||
|
|
||||||
def sanitize_open(filename, open_mode):
|
def sanitize_open(filename, open_mode):
|
||||||
"""Try to open the given filename, and slightly tweak it if this fails.
|
"""Try to open the given filename, and slightly tweak it if this fails.
|
||||||
|
|
||||||
|
@ -120,13 +124,15 @@ def sanitize_open(filename, open_mode):
|
||||||
stream = open(filename, open_mode)
|
stream = open(filename, open_mode)
|
||||||
return (stream, filename)
|
return (stream, filename)
|
||||||
|
|
||||||
|
|
||||||
def timeconvert(timestr):
|
def timeconvert(timestr):
|
||||||
"""Convert RFC 2822 defined time string into system timestamp"""
|
"""Convert RFC 2822 defined time string into system timestamp"""
|
||||||
timestamp = None
|
timestamp = None
|
||||||
timetuple = email.utils.parsedate_tz(timestr)
|
timetuple = email.utils.parsedate_tz(timestr)
|
||||||
if timetuple is not None:
|
if timetuple is not None:
|
||||||
timestamp = email.utils.mktime_tz(timetuple)
|
timestamp = email.utils.mktime_tz(timetuple)
|
||||||
return timestamp
|
return timestamp
|
||||||
|
|
||||||
|
|
||||||
class DownloadError(Exception):
|
class DownloadError(Exception):
|
||||||
"""Download Error exception.
|
"""Download Error exception.
|
||||||
|
@ -137,6 +143,7 @@ class DownloadError(Exception):
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SameFileError(Exception):
|
class SameFileError(Exception):
|
||||||
"""Same File exception.
|
"""Same File exception.
|
||||||
|
|
||||||
|
@ -145,6 +152,7 @@ class SameFileError(Exception):
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class PostProcessingError(Exception):
|
class PostProcessingError(Exception):
|
||||||
"""Post Processing exception.
|
"""Post Processing exception.
|
||||||
|
|
||||||
|
@ -153,6 +161,7 @@ class PostProcessingError(Exception):
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnavailableVideoError(Exception):
|
class UnavailableVideoError(Exception):
|
||||||
"""Unavailable Format exception.
|
"""Unavailable Format exception.
|
||||||
|
|
||||||
|
@ -161,6 +170,7 @@ class UnavailableVideoError(Exception):
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ContentTooShortError(Exception):
|
class ContentTooShortError(Exception):
|
||||||
"""Content Too Short exception.
|
"""Content Too Short exception.
|
||||||
|
|
||||||
|
@ -176,6 +186,7 @@ class ContentTooShortError(Exception):
|
||||||
self.downloaded = downloaded
|
self.downloaded = downloaded
|
||||||
self.expected = expected
|
self.expected = expected
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLHandler(urllib2.HTTPHandler):
|
class YoutubeDLHandler(urllib2.HTTPHandler):
|
||||||
"""Handler for HTTP requests and responses.
|
"""Handler for HTTP requests and responses.
|
||||||
|
|
||||||
|
@ -185,11 +196,11 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
|
||||||
a particular request, the original request in the program code only has
|
a particular request, the original request in the program code only has
|
||||||
to include the HTTP header "Youtubedl-No-Compression", which will be
|
to include the HTTP header "Youtubedl-No-Compression", which will be
|
||||||
removed before making the real request.
|
removed before making the real request.
|
||||||
|
|
||||||
Part of this code was copied from:
|
Part of this code was copied from:
|
||||||
|
|
||||||
http://techknack.net/python-urllib2-handlers/
|
http://techknack.net/python-urllib2-handlers/
|
||||||
|
|
||||||
Andrew Rowls, the author of that code, agreed to release it to the
|
Andrew Rowls, the author of that code, agreed to release it to the
|
||||||
public domain.
|
public domain.
|
||||||
"""
|
"""
|
||||||
|
@ -200,7 +211,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
|
||||||
return zlib.decompress(data, -zlib.MAX_WBITS)
|
return zlib.decompress(data, -zlib.MAX_WBITS)
|
||||||
except zlib.error:
|
except zlib.error:
|
||||||
return zlib.decompress(data)
|
return zlib.decompress(data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def addinfourl_wrapper(stream, headers, url, code):
|
def addinfourl_wrapper(stream, headers, url, code):
|
||||||
if hasattr(urllib2.addinfourl, 'getcode'):
|
if hasattr(urllib2.addinfourl, 'getcode'):
|
||||||
|
@ -208,7 +219,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
|
||||||
ret = urllib2.addinfourl(stream, headers, url)
|
ret = urllib2.addinfourl(stream, headers, url)
|
||||||
ret.code = code
|
ret.code = code
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def http_request(self, req):
|
def http_request(self, req):
|
||||||
for h in std_headers:
|
for h in std_headers:
|
||||||
if h in req.headers:
|
if h in req.headers:
|
||||||
|
@ -234,6 +245,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
|
||||||
resp.msg = old_resp.msg
|
resp.msg = old_resp.msg
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
class FileDownloader(object):
|
class FileDownloader(object):
|
||||||
"""File Downloader class.
|
"""File Downloader class.
|
||||||
|
|
||||||
|
@ -325,7 +337,7 @@ class FileDownloader(object):
|
||||||
else:
|
else:
|
||||||
exponent = long(math.log(bytes, 1024.0))
|
exponent = long(math.log(bytes, 1024.0))
|
||||||
suffix = 'bkMGTPEZY'[exponent]
|
suffix = 'bkMGTPEZY'[exponent]
|
||||||
converted = float(bytes) / float(1024**exponent)
|
converted = float(bytes) / float(1024 ** exponent)
|
||||||
return '%.2f%s' % (converted, suffix)
|
return '%.2f%s' % (converted, suffix)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -463,7 +475,7 @@ class FileDownloader(object):
|
||||||
os.rename(old_filename, new_filename)
|
os.rename(old_filename, new_filename)
|
||||||
except (IOError, OSError), err:
|
except (IOError, OSError), err:
|
||||||
self.trouble(u'ERROR: unable to rename file')
|
self.trouble(u'ERROR: unable to rename file')
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
if last_modified_hdr is None:
|
if last_modified_hdr is None:
|
||||||
|
@ -477,7 +489,7 @@ class FileDownloader(object):
|
||||||
if filetime is None:
|
if filetime is None:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
os.utime(filename,(time.time(), filetime))
|
os.utime(filename, (time.time(), filetime))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -680,7 +692,7 @@ class FileDownloader(object):
|
||||||
# Request parameters in case of being able to resume
|
# Request parameters in case of being able to resume
|
||||||
if self.params.get('continuedl', False) and resume_len != 0:
|
if self.params.get('continuedl', False) and resume_len != 0:
|
||||||
self.report_resuming_byte(resume_len)
|
self.report_resuming_byte(resume_len)
|
||||||
request.add_header('Range','bytes=%d-' % resume_len)
|
request.add_header('Range', 'bytes=%d-' % resume_len)
|
||||||
open_mode = 'ab'
|
open_mode = 'ab'
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -706,7 +718,7 @@ class FileDownloader(object):
|
||||||
else:
|
else:
|
||||||
# Examine the reported length
|
# Examine the reported length
|
||||||
if (content_length is not None and
|
if (content_length is not None and
|
||||||
(resume_len - 100 < long(content_length) < resume_len + 100)):
|
(resume_len - 100 < long(content_length) < resume_len + 100)):
|
||||||
# The file had already been fully downloaded.
|
# The file had already been fully downloaded.
|
||||||
# Explanation to the above condition: in issue #175 it was revealed that
|
# Explanation to the above condition: in issue #175 it was revealed that
|
||||||
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
||||||
|
@ -784,6 +796,7 @@ class FileDownloader(object):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class InfoExtractor(object):
|
class InfoExtractor(object):
|
||||||
"""Information Extractor class.
|
"""Information Extractor class.
|
||||||
|
|
||||||
|
@ -855,6 +868,7 @@ class InfoExtractor(object):
|
||||||
"""Real extraction process. Redefine in subclasses."""
|
"""Real extraction process. Redefine in subclasses."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class YoutubeIE(InfoExtractor):
|
class YoutubeIE(InfoExtractor):
|
||||||
"""Information extractor for youtube.com."""
|
"""Information extractor for youtube.com."""
|
||||||
|
|
||||||
|
@ -1009,7 +1023,7 @@ class YoutubeIE(InfoExtractor):
|
||||||
self.report_video_info_webpage_download(video_id)
|
self.report_video_info_webpage_download(video_id)
|
||||||
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
|
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
|
||||||
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
|
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
|
||||||
% (video_id, el_type))
|
% (video_id, el_type))
|
||||||
request = urllib2.Request(video_info_url)
|
request = urllib2.Request(video_info_url)
|
||||||
try:
|
try:
|
||||||
video_info_webpage = urllib2.urlopen(request).read()
|
video_info_webpage = urllib2.urlopen(request).read()
|
||||||
|
@ -1371,6 +1385,7 @@ class DailymotionIE(InfoExtractor):
|
||||||
except UnavailableVideoError:
|
except UnavailableVideoError:
|
||||||
self._downloader.trouble(u'\nERROR: unable to download video')
|
self._downloader.trouble(u'\nERROR: unable to download video')
|
||||||
|
|
||||||
|
|
||||||
class GoogleIE(InfoExtractor):
|
class GoogleIE(InfoExtractor):
|
||||||
"""Information extractor for video.google.com."""
|
"""Information extractor for video.google.com."""
|
||||||
|
|
||||||
|
@ -1464,7 +1479,6 @@ class GoogleIE(InfoExtractor):
|
||||||
else: # we need something to pass to process_info
|
else: # we need something to pass to process_info
|
||||||
video_thumbnail = ''
|
video_thumbnail = ''
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Process video information
|
# Process video information
|
||||||
self._downloader.process_info({
|
self._downloader.process_info({
|
||||||
|
@ -1664,7 +1678,8 @@ class YahooIE(InfoExtractor):
|
||||||
self._downloader.trouble(u'ERROR: unable to extract video description')
|
self._downloader.trouble(u'ERROR: unable to extract video description')
|
||||||
return
|
return
|
||||||
video_description = mobj.group(1).decode('utf-8')
|
video_description = mobj.group(1).decode('utf-8')
|
||||||
if not video_description: video_description = 'No description available.'
|
if not video_description:
|
||||||
|
video_description = 'No description available.'
|
||||||
|
|
||||||
# Extract video height and width
|
# Extract video height and width
|
||||||
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
|
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
|
||||||
|
@ -1685,8 +1700,8 @@ class YahooIE(InfoExtractor):
|
||||||
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
|
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
|
||||||
yv_bitrate = '700' # according to Wikipedia this is hard-coded
|
yv_bitrate = '700' # according to Wikipedia this is hard-coded
|
||||||
request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
|
request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
|
||||||
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
|
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
|
||||||
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
|
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
|
||||||
try:
|
try:
|
||||||
self.report_download_webpage(video_id)
|
self.report_download_webpage(video_id)
|
||||||
webpage = urllib2.urlopen(request).read()
|
webpage = urllib2.urlopen(request).read()
|
||||||
|
@ -1779,11 +1794,11 @@ class GenericIE(InfoExtractor):
|
||||||
return
|
return
|
||||||
|
|
||||||
video_url = urllib.unquote(mobj.group(1))
|
video_url = urllib.unquote(mobj.group(1))
|
||||||
video_id = os.path.basename(video_url)
|
video_id = os.path.basename(video_url)
|
||||||
|
|
||||||
# here's a fun little line of code for you:
|
# here's a fun little line of code for you:
|
||||||
video_extension = os.path.splitext(video_id)[1][1:]
|
video_extension = os.path.splitext(video_id)[1][1:]
|
||||||
video_id = os.path.splitext(video_id)[0]
|
video_id = os.path.splitext(video_id)[0]
|
||||||
|
|
||||||
# it's tempting to parse this further, but you would
|
# it's tempting to parse this further, but you would
|
||||||
# have to take into account all the variations like
|
# have to take into account all the variations like
|
||||||
|
@ -1856,7 +1871,7 @@ class YoutubeSearchIE(InfoExtractor):
|
||||||
|
|
||||||
prefix, query = query.split(':')
|
prefix, query = query.split(':')
|
||||||
prefix = prefix[8:]
|
prefix = prefix[8:]
|
||||||
query = query.encode('utf-8')
|
query = query.encode('utf-8')
|
||||||
if prefix == '':
|
if prefix == '':
|
||||||
self._download_n_results(query, 1)
|
self._download_n_results(query, 1)
|
||||||
return
|
return
|
||||||
|
@ -1870,7 +1885,7 @@ class YoutubeSearchIE(InfoExtractor):
|
||||||
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
||||||
return
|
return
|
||||||
elif n > self._max_youtube_results:
|
elif n > self._max_youtube_results:
|
||||||
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
|
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
|
||||||
n = self._max_youtube_results
|
n = self._max_youtube_results
|
||||||
self._download_n_results(query, n)
|
self._download_n_results(query, n)
|
||||||
return
|
return
|
||||||
|
@ -1914,6 +1929,7 @@ class YoutubeSearchIE(InfoExtractor):
|
||||||
|
|
||||||
pagenum = pagenum + 1
|
pagenum = pagenum + 1
|
||||||
|
|
||||||
|
|
||||||
class GoogleSearchIE(InfoExtractor):
|
class GoogleSearchIE(InfoExtractor):
|
||||||
"""Information Extractor for Google Video search queries."""
|
"""Information Extractor for Google Video search queries."""
|
||||||
_VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
|
_VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
|
||||||
|
@ -1947,7 +1963,7 @@ class GoogleSearchIE(InfoExtractor):
|
||||||
|
|
||||||
prefix, query = query.split(':')
|
prefix, query = query.split(':')
|
||||||
prefix = prefix[8:]
|
prefix = prefix[8:]
|
||||||
query = query.encode('utf-8')
|
query = query.encode('utf-8')
|
||||||
if prefix == '':
|
if prefix == '':
|
||||||
self._download_n_results(query, 1)
|
self._download_n_results(query, 1)
|
||||||
return
|
return
|
||||||
|
@ -1961,7 +1977,7 @@ class GoogleSearchIE(InfoExtractor):
|
||||||
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
||||||
return
|
return
|
||||||
elif n > self._max_google_results:
|
elif n > self._max_google_results:
|
||||||
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
|
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
|
||||||
n = self._max_google_results
|
n = self._max_google_results
|
||||||
self._download_n_results(query, n)
|
self._download_n_results(query, n)
|
||||||
return
|
return
|
||||||
|
@ -2005,6 +2021,7 @@ class GoogleSearchIE(InfoExtractor):
|
||||||
|
|
||||||
pagenum = pagenum + 1
|
pagenum = pagenum + 1
|
||||||
|
|
||||||
|
|
||||||
class YahooSearchIE(InfoExtractor):
|
class YahooSearchIE(InfoExtractor):
|
||||||
"""Information Extractor for Yahoo! Video search queries."""
|
"""Information Extractor for Yahoo! Video search queries."""
|
||||||
_VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
|
_VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
|
||||||
|
@ -2038,7 +2055,7 @@ class YahooSearchIE(InfoExtractor):
|
||||||
|
|
||||||
prefix, query = query.split(':')
|
prefix, query = query.split(':')
|
||||||
prefix = prefix[8:]
|
prefix = prefix[8:]
|
||||||
query = query.encode('utf-8')
|
query = query.encode('utf-8')
|
||||||
if prefix == '':
|
if prefix == '':
|
||||||
self._download_n_results(query, 1)
|
self._download_n_results(query, 1)
|
||||||
return
|
return
|
||||||
|
@ -2052,7 +2069,7 @@ class YahooSearchIE(InfoExtractor):
|
||||||
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
||||||
return
|
return
|
||||||
elif n > self._max_yahoo_results:
|
elif n > self._max_yahoo_results:
|
||||||
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
|
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
|
||||||
n = self._max_yahoo_results
|
n = self._max_yahoo_results
|
||||||
self._download_n_results(query, n)
|
self._download_n_results(query, n)
|
||||||
return
|
return
|
||||||
|
@ -2096,6 +2113,7 @@ class YahooSearchIE(InfoExtractor):
|
||||||
|
|
||||||
pagenum = pagenum + 1
|
pagenum = pagenum + 1
|
||||||
|
|
||||||
|
|
||||||
class YoutubePlaylistIE(InfoExtractor):
|
class YoutubePlaylistIE(InfoExtractor):
|
||||||
"""Information Extractor for YouTube playlists."""
|
"""Information Extractor for YouTube playlists."""
|
||||||
|
|
||||||
|
@ -2172,6 +2190,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||||
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class YoutubeUserIE(InfoExtractor):
|
class YoutubeUserIE(InfoExtractor):
|
||||||
"""Information Extractor for YouTube users."""
|
"""Information Extractor for YouTube users."""
|
||||||
|
|
||||||
|
@ -2193,7 +2212,7 @@ class YoutubeUserIE(InfoExtractor):
|
||||||
def report_download_page(self, username, start_index):
|
def report_download_page(self, username, start_index):
|
||||||
"""Report attempt to download user page."""
|
"""Report attempt to download user page."""
|
||||||
self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
|
self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
|
||||||
(username, start_index, start_index + self._GDATA_PAGE_SIZE))
|
(username, start_index, start_index + self._GDATA_PAGE_SIZE))
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
self._youtube_ie.initialize()
|
self._youtube_ie.initialize()
|
||||||
|
@ -2255,9 +2274,9 @@ class YoutubeUserIE(InfoExtractor):
|
||||||
video_ids = video_ids[playliststart:]
|
video_ids = video_ids[playliststart:]
|
||||||
else:
|
else:
|
||||||
video_ids = video_ids[playliststart:playlistend]
|
video_ids = video_ids[playliststart:playlistend]
|
||||||
|
|
||||||
self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
|
self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
|
||||||
(username, all_ids_count, len(video_ids)))
|
(username, all_ids_count, len(video_ids)))
|
||||||
|
|
||||||
for video_id in video_ids:
|
for video_id in video_ids:
|
||||||
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
|
||||||
|
@ -2342,6 +2361,7 @@ class DepositFilesIE(InfoExtractor):
|
||||||
except UnavailableVideoError, err:
|
except UnavailableVideoError, err:
|
||||||
self._downloader.trouble(u'ERROR: unable to download file')
|
self._downloader.trouble(u'ERROR: unable to download file')
|
||||||
|
|
||||||
|
|
||||||
class FacebookIE(InfoExtractor):
|
class FacebookIE(InfoExtractor):
|
||||||
"""Information Extractor for Facebook"""
|
"""Information Extractor for Facebook"""
|
||||||
|
|
||||||
|
@ -2510,7 +2530,7 @@ class FacebookIE(InfoExtractor):
|
||||||
# description
|
# description
|
||||||
video_description = 'No description available.'
|
video_description = 'No description available.'
|
||||||
if (self._downloader.params.get('forcedescription', False) and
|
if (self._downloader.params.get('forcedescription', False) and
|
||||||
'description' in video_info):
|
'description' in video_info):
|
||||||
video_description = video_info['description']
|
video_description = video_info['description']
|
||||||
|
|
||||||
url_map = video_info['video_urls']
|
url_map = video_info['video_urls']
|
||||||
|
@ -2565,6 +2585,7 @@ class FacebookIE(InfoExtractor):
|
||||||
except UnavailableVideoError, err:
|
except UnavailableVideoError, err:
|
||||||
self._downloader.trouble(u'\nERROR: unable to download video')
|
self._downloader.trouble(u'\nERROR: unable to download video')
|
||||||
|
|
||||||
|
|
||||||
class PostProcessor(object):
|
class PostProcessor(object):
|
||||||
"""Post Processor class.
|
"""Post Processor class.
|
||||||
|
|
||||||
|
@ -2611,6 +2632,7 @@ class PostProcessor(object):
|
||||||
"""
|
"""
|
||||||
return information # by default, do nothing
|
return information # by default, do nothing
|
||||||
|
|
||||||
|
|
||||||
class FFmpegExtractAudioPP(PostProcessor):
|
class FFmpegExtractAudioPP(PostProcessor):
|
||||||
|
|
||||||
def __init__(self, downloader=None, preferredcodec=None):
|
def __init__(self, downloader=None, preferredcodec=None):
|
||||||
|
|
Loading…
Reference in a new issue